From: Peter Munch Date: Wed, 21 Oct 2020 14:26:00 +0000 (+0200) Subject: Introduce MPI-3 shared-memory capabilities in L:d:V X-Git-Tag: v9.3.0-rc1~957^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F11074%2Fhead;p=dealii.git Introduce MPI-3 shared-memory capabilities in L:d:V --- diff --git a/include/deal.II/base/memory_space_data.h b/include/deal.II/base/memory_space_data.h index da957755d6..e2feb375db 100644 --- a/include/deal.II/base/memory_space_data.h +++ b/include/deal.II/base/memory_space_data.h @@ -75,6 +75,11 @@ namespace MemorySpace * Pointer to data on the device. */ std::unique_ptr values_dev; + + /** + * Pointers to the data of the processes sharing the same memory. + */ + std::vector> values_sm; }; @@ -118,6 +123,8 @@ namespace MemorySpace // This is not used but it allows to simplify the code until we start using // CUDA-aware MPI. std::unique_ptr values_dev; + + std::vector> values_sm; }; @@ -165,6 +172,11 @@ namespace MemorySpace std::unique_ptr> values; std::unique_ptr values_dev; + + /** + * This is currently not used. + */ + std::vector> values_sm; }; diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index 904d337628..0b97e7e36b 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -218,6 +218,25 @@ namespace LinearAlgebra * device_id = my_rank % n_devices; * cudaSetDevice(device_id); * + * + *

MPI-3 shared-memory support

+ * + * In Host mode, this class allows to use MPI-3 shared-memory features + * by providing a separate MPI communicator that consists of processes on + * the same shared-memory domain. By calling + * vector.shared_vector_data(); + * + * users have read-only access to both locally-owned and ghost values of + * processes combined in the shared-memory communicator (@p comm_sm in + * reinit()). + * + * You can create a communicator consisting of all processes on + * the same shared-memory domain with: + * MPI_Comm comm_sm; + * MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, + * &comm_sm); + * + * * @see CUDAWrappers */ template @@ -291,7 +310,7 @@ namespace LinearAlgebra /** * Create the vector based on the parallel partitioning described in @p - * partitioner. The input argument is a shared pointer, which store the + * partitioner. The input argument is a shared pointer, which stores the * partitioner data only once and share it between several vectors with * the same layout. */ @@ -354,18 +373,29 @@ namespace LinearAlgebra /** * Initialize the vector given to the parallel partitioning described in - * @p partitioner. The input argument is a shared pointer, which store + * @p partitioner. The input argument is a shared pointer, which stores * the partitioner data only once and share it between several vectors * with the same layout. + * + * The optional argument @p comm_sm, which consists of processes on + * the same shared-memory domain, allows users have read-only access to + * both locally-owned and ghost values of processes combined in the + * shared-memory communicator. */ void reinit( - const std::shared_ptr &partitioner); + const std::shared_ptr &partitioner, + const MPI_Comm &comm_sm = MPI_COMM_SELF); /** * Initialize vector with @p local_size locally-owned and @p ghost_size * ghost degrees of freedoms. * + * The optional argument @p comm_sm, which consists of processes on + * the same shared-memory domain, allows users have read-only access to + * both locally-owned and ghost values of processes combined in the + * shared-memory communicator. + * * @note In the created underlying partitioner, the local index range is * translated to global indices in an ascending and one-to-one fashion, * i.e., the indices of process $p$ sit exactly between the indices of @@ -378,7 +408,8 @@ namespace LinearAlgebra void reinit(const types::global_dof_index local_size, const types::global_dof_index ghost_size, - const MPI_Comm comm); + const MPI_Comm & comm, + const MPI_Comm & comm_sm = MPI_COMM_SELF); /** * Swap the contents of this vector and the other vector @p v. One could @@ -1138,6 +1169,13 @@ namespace LinearAlgebra void set_ghost_state(const bool ghosted) const; + /** + * Get pointers to the beginning of the values of the other + * processes of the same shared-memory domain. + */ + const std::vector> & + shared_vector_data() const; + //@} /** @@ -1312,6 +1350,11 @@ namespace LinearAlgebra */ mutable std::mutex mutex; + /** + * Communicator to be used for the shared-memory domain. + */ + MPI_Comm comm_sm; + /** * A helper function that clears the compress_requests and * update_ghost_values_requests field. Used in reinit() functions. @@ -1323,7 +1366,8 @@ namespace LinearAlgebra * A helper function that is used to resize the val array. */ void - resize_val(const size_type new_allocated_size); + resize_val(const size_type new_allocated_size, + const MPI_Comm &comm_sm = MPI_COMM_SELF); // Make all other vector types friends. template @@ -1515,6 +1559,15 @@ namespace LinearAlgebra + template + const std::vector> & + Vector::shared_vector_data() const + { + return data.values_sm; + } + + + template inline Number Vector::operator()(const size_type global_index) const diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 6eed0cbbe8..24a1864fa7 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -94,7 +94,8 @@ namespace LinearAlgebra const types::global_dof_index /*new_alloc_size*/, types::global_dof_index & /*allocated_size*/, ::dealii::MemorySpace::MemorySpaceData - & /*data*/) + & /*data*/, + const MPI_Comm & /*comm_sm*/) {} static void @@ -128,27 +129,132 @@ namespace LinearAlgebra resize_val(const types::global_dof_index new_alloc_size, types::global_dof_index & allocated_size, ::dealii::MemorySpace:: - MemorySpaceData &data) + MemorySpaceData &data, + const MPI_Comm &comm_shared) { - if (new_alloc_size > allocated_size) + if (comm_shared == MPI_COMM_SELF) { - Assert(((allocated_size > 0 && data.values != nullptr) || - data.values == nullptr), - ExcInternalError()); + if (new_alloc_size > allocated_size) + { + Assert(((allocated_size > 0 && data.values != nullptr) || + data.values == nullptr), + ExcInternalError()); - Number *new_val; - Utilities::System::posix_memalign( - reinterpret_cast(&new_val), - 64, - sizeof(Number) * new_alloc_size); - data.values = {new_val, [](Number *data) { std::free(data); }}; + Number *new_val; + Utilities::System::posix_memalign( + reinterpret_cast(&new_val), + 64, + sizeof(Number) * new_alloc_size); + data.values = {new_val, + [](Number *data) { std::free(data); }}; - allocated_size = new_alloc_size; + allocated_size = new_alloc_size; + } + else if (new_alloc_size == 0) + { + data.values.reset(); + allocated_size = 0; + } + data.values_sm = { + ArrayView(data.values.get(), new_alloc_size)}; } - else if (new_alloc_size == 0) + else { - data.values.reset(); - allocated_size = 0; +#ifdef DEAL_II_WITH_MPI +# if DEAL_II_MPI_VERSION_GTE(3, 0) + allocated_size = new_alloc_size; + + const unsigned int size_sm = + Utilities::MPI::n_mpi_processes(comm_shared); + const unsigned int rank_sm = + Utilities::MPI::this_mpi_process(comm_shared); + + MPI_Win mpi_window; + Number *data_this; + + + std::vector others(size_sm); + + MPI_Info info; + MPI_Info_create(&info); + + MPI_Info_set(info, "alloc_shared_noncontig", "true"); + + const std::size_t align_by = 64; + + std::size_t s = + ((new_alloc_size * sizeof(Number) + align_by - 1) / + sizeof(Number)) * + sizeof(Number); + + auto ierr = MPI_Win_allocate_shared( + s, sizeof(Number), info, comm_shared, &data_this, &mpi_window); + AssertThrowMPI(ierr); + + for (unsigned int i = 0; i < size_sm; i++) + { + int disp_unit; + MPI_Aint ssize; + const auto ierr = MPI_Win_shared_query( + mpi_window, i, &ssize, &disp_unit, &others[i]); + AssertThrowMPI(ierr); + } + + Number *ptr_unaligned = others[rank_sm]; + Number *ptr_aligned = ptr_unaligned; + + AssertThrow(std::align(align_by, + new_alloc_size * sizeof(Number), + reinterpret_cast(ptr_aligned), + s) != nullptr, + ExcNotImplemented()); + + unsigned int n_align_local = ptr_aligned - ptr_unaligned; + std::vector n_align_sm(size_sm); + + ierr = MPI_Allgather(&n_align_local, + 1, + MPI_UNSIGNED, + n_align_sm.data(), + 1, + MPI_UNSIGNED, + comm_shared); + AssertThrowMPI(ierr); + + for (unsigned int i = 0; i < size_sm; i++) + others[i] += n_align_sm[i]; + + std::vector new_alloc_sizes(size_sm); + + ierr = MPI_Allgather(&new_alloc_size, + 1, + MPI_UNSIGNED, + new_alloc_sizes.data(), + 1, + MPI_UNSIGNED, + comm_shared); + AssertThrowMPI(ierr); + + data.values_sm.resize(size_sm); + for (unsigned int i = 0; i < size_sm; i++) + data.values_sm[i] = + ArrayView(others[i], new_alloc_sizes[i]); + + data.values = {ptr_aligned, [mpi_window](Number *) mutable { + // note: we are creating here a copy of the + // window other approaches led to segmentation + // faults + const auto ierr = MPI_Win_free(&mpi_window); + AssertThrowMPI(ierr); + }}; +# else + AssertThrow(false, + ExcMessage( + "Sorry, this feature requires MPI 3.0 support")); +# endif +#else + Assert(false, ExcInternalError()); +#endif } } @@ -230,8 +336,11 @@ namespace LinearAlgebra resize_val(const types::global_dof_index new_alloc_size, types::global_dof_index & allocated_size, ::dealii::MemorySpace:: - MemorySpaceData &data) + MemorySpaceData &data, + const MPI_Comm &comm_sm) { + (void)comm_sm; + static_assert( std::is_same::value || std::is_same::value, @@ -401,11 +510,15 @@ namespace LinearAlgebra template void - Vector::resize_val(const size_type new_alloc_size) + Vector::resize_val(const size_type new_alloc_size, + const MPI_Comm &comm_sm) { internal::la_parallel_vector_templates_functions< Number, - MemorySpaceType>::resize_val(new_alloc_size, allocated_size, data); + MemorySpaceType>::resize_val(new_alloc_size, + allocated_size, + data, + comm_sm); thread_loop_partitioner = std::make_shared<::dealii::parallel::internal::TBBPartitioner>(); @@ -421,7 +534,7 @@ namespace LinearAlgebra clear_mpi_requests(); // check whether we need to reallocate - resize_val(size); + resize_val(size, comm_sm); // delete previous content in import data import_data.values.reset(); @@ -444,12 +557,15 @@ namespace LinearAlgebra Vector::reinit( const types::global_dof_index local_size, const types::global_dof_index ghost_size, - const MPI_Comm comm) + const MPI_Comm & comm, + const MPI_Comm & comm_sm) { clear_mpi_requests(); + this->comm_sm = comm_sm; + // check whether we need to reallocate - resize_val(local_size + ghost_size); + resize_val(local_size + ghost_size, comm_sm); // delete previous content in import data import_data.values.reset(); @@ -475,6 +591,8 @@ namespace LinearAlgebra clear_mpi_requests(); Assert(v.partitioner.get() != nullptr, ExcNotInitialized()); + this->comm_sm = v.comm_sm; + // check whether the partitioners are // different (check only if the are allocated // differently, not if the actual data is @@ -484,7 +602,7 @@ namespace LinearAlgebra partitioner = v.partitioner; const size_type new_allocated_size = partitioner->local_size() + partitioner->n_ghost_indices(); - resize_val(new_allocated_size); + resize_val(new_allocated_size, this->comm_sm); } if (omit_zeroing_entries == false) @@ -535,15 +653,18 @@ namespace LinearAlgebra template void Vector::reinit( - const std::shared_ptr &partitioner_in) + const std::shared_ptr &partitioner_in, + const MPI_Comm & comm_sm) { clear_mpi_requests(); partitioner = partitioner_in; + this->comm_sm = comm_sm; + // set vector size and allocate memory const size_type new_allocated_size = partitioner->local_size() + partitioner->n_ghost_indices(); - resize_val(new_allocated_size); + resize_val(new_allocated_size, comm_sm); // initialize to zero *this = Number(); @@ -565,6 +686,7 @@ namespace LinearAlgebra Vector::Vector() : partitioner(std::make_shared()) , allocated_size(0) + , comm_sm(MPI_COMM_SELF) { reinit(0); } @@ -577,6 +699,7 @@ namespace LinearAlgebra : Subscriptor() , allocated_size(0) , vector_is_ghosted(false) + , comm_sm(MPI_COMM_SELF) { reinit(v, true); @@ -599,6 +722,7 @@ namespace LinearAlgebra const MPI_Comm communicator) : allocated_size(0) , vector_is_ghosted(false) + , comm_sm(MPI_COMM_SELF) { reinit(local_range, ghost_indices, communicator); } @@ -610,6 +734,7 @@ namespace LinearAlgebra const MPI_Comm communicator) : allocated_size(0) , vector_is_ghosted(false) + , comm_sm(MPI_COMM_SELF) { reinit(local_range, communicator); } @@ -620,6 +745,7 @@ namespace LinearAlgebra Vector::Vector(const size_type size) : allocated_size(0) , vector_is_ghosted(false) + , comm_sm(MPI_COMM_SELF) { reinit(size, false); } @@ -631,6 +757,7 @@ namespace LinearAlgebra const std::shared_ptr &partitioner) : allocated_size(0) , vector_is_ghosted(false) + , comm_sm(MPI_COMM_SELF) { reinit(partitioner); } @@ -677,6 +804,8 @@ namespace LinearAlgebra // the same local range but different ghost layout bool must_update_ghost_values = c.vector_is_ghosted; + this->comm_sm = c.comm_sm; + // check whether the two vectors use the same parallel partitioner. if // not, check if all local ranges are the same (that way, we can // exchange data between different parallel layouts). One variant which