From: Peter Munch Date: Tue, 7 Apr 2020 08:47:05 +0000 (+0200) Subject: Rename NoncontiguousPartitioner::uptate_values() X-Git-Tag: v9.2.0-rc1~129^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F9847%2Fhead;p=dealii.git Rename NoncontiguousPartitioner::uptate_values() --- diff --git a/include/deal.II/base/mpi_noncontiguous_partitioner.h b/include/deal.II/base/mpi_noncontiguous_partitioner.h index 45ca33b2f0..fe37f38699 100644 --- a/include/deal.II/base/mpi_noncontiguous_partitioner.h +++ b/include/deal.II/base/mpi_noncontiguous_partitioner.h @@ -38,7 +38,6 @@ namespace Utilities * * @author Peter Munch, 2020 */ - template class NoncontiguousPartitioner : public dealii::LinearAlgebra::CommunicationPatternBase { @@ -51,16 +50,16 @@ namespace Utilities /** * Constructor. Set up point-to-point communication pattern based on the - * IndexSets arguments @p indexset_has and @p indexset_want for the MPI + * IndexSets arguments @p indexset_locally_owned and @p indexset_ghost for the MPI * communicator @p communicator. */ - NoncontiguousPartitioner(const IndexSet &indexset_has, - const IndexSet &indexset_want, + NoncontiguousPartitioner(const IndexSet &indexset_locally_owned, + const IndexSet &indexset_ghost, const MPI_Comm &communicator); /** - * Constructor. Same as above but for vectors of indices @p indices_has - * and @p indices_want. This allows the indices to not be sorted and the + * Constructor. Same as above but for vectors of indices @p indices_locally_owned + * and @p indices_ghost. This allows the indices to not be sorted and the * values are read and written automatically at the right position of * the vector during update_values(), update_values_start(), and * update_values_finish(). It is allowed to include entries with the @@ -68,42 +67,95 @@ namespace Utilities * exchange but are present in the data vectors as padding. */ NoncontiguousPartitioner( - const std::vector &indices_has, - const std::vector &indices_want, + const std::vector &indices_locally_owned, + const std::vector &indices_ghost, const MPI_Comm & communicator); /** - * Fill the vector @p dst according to the precomputed communication - * pattern with values from @p src. + * Fill the vector @p ghost_array according to the precomputed communication + * pattern with values from @p locally_owned_array. * * @pre The vectors only have to provide a method begin(), which allows * to access their raw data. * + * @pre The size of both vectors must be at least as large as the number + * of entries in the index sets passed to the constructors or the + * reinit() functions. + * * @note This function calls the methods update_values_start() and * update_values_finish() in sequence. Users can call these two * functions separately and hereby overlap communication and * computation. */ - template + template + void + export_to_ghosted_array( + const ArrayView &locally_owned_array, + const ArrayView & ghost_array) const; + + /** + * Same as above but with an interface similar to + * Utilities::MPI::Partitioner::export_to_ghosted_array_start and + * Utilities::MPI::Partitioner::export_to_ghosted_array_finish. In this + * function, the user can provide the temporary data structures to be + * used. + * + * @pre The size of the @p temporary_storage vector has to be at least + * as large as the sum of the number of entries in the index sets + * passed to the constructor and the reinit() functions. The reason + * for this is that this vector is used as buffer for both sending + * and receiving data. + */ + template void - update_values(VectorType &dst, const VectorType &src) const; + export_to_ghosted_array( + const unsigned int communication_channel, + const ArrayView &locally_owned_array, + const ArrayView & temporary_storage, + const ArrayView & ghost_array, + std::vector & requests) const; /** * Start update: Data is packed, non-blocking send and receives * are started. + * + * @note In contrast to the function + * Utilities::MPI::Partitioner::export_to_ghosted_array_start, the user + * does not pass a reference to the destination vector, since the data + * is received into a designated part of the buffer @p temporary_storage. This + * allows for padding and other post-processing of the received data. + * + * @pre The required size of the vectors are the same as in the functions + * above. */ - template + template void - update_values_start(const VectorType &src, const unsigned int tag) const; + export_to_ghosted_array_start( + const unsigned int communication_channel, + const ArrayView &locally_owned_array, + const ArrayView & temporary_storage, + std::vector & requests) const; /** * Finish update. The method waits until all data has been sent and * received. Once data from any process is received it is processed and * placed at the right position of the vector @p dst. + * + * @note In contrast to the function + * Utilities::MPI::Partitioner::export_to_ghosted_array_finish, the user + * also has to pass a reference to the buffer @p temporary_storage, + * since the data has been received into the buffer and not into the + * destination vector. + * + * @pre The required size of the vectors are the same as in the functions + * above. */ - template + template void - update_values_finish(VectorType &dst, const unsigned int tag) const; + export_to_ghosted_array_finish( + const ArrayView &temporary_storage, + const ArrayView & ghost_array, + std::vector & requests) const; /** * Returns the number of processes this process sends data to and the @@ -128,16 +180,16 @@ namespace Utilities * Initialize the inner data structures. */ void - reinit(const IndexSet &indexset_has, - const IndexSet &indexset_want, + reinit(const IndexSet &indexset_locally_owned, + const IndexSet &indexset_ghost, const MPI_Comm &communicator) override; /** * Initialize the inner data structures. */ void - reinit(const std::vector &indices_has, - const std::vector &indices_want, + reinit(const std::vector &indices_locally_owned, + const std::vector &indices_ghost, const MPI_Comm & communicator); private: @@ -166,16 +218,6 @@ namespace Utilities */ std::vector send_indices; - /** - * Buffer containing the values sorted accoding to the ranks. - */ - mutable std::vector send_buffers; - - /** - * MPI requests for sending. - */ - mutable std::vector send_requests; - /** * The ranks this process receives data from. */ @@ -197,14 +239,22 @@ namespace Utilities std::vector recv_indices; /** - * Buffer containing the values sorted by rank. + * Buffer containing the values sorted by rank for sending and receiving. + * + * @note Only allocated if not provided externally by user. + * + * @note At this place we do not know the type of the data to be sent. So + * we use an arbitrary type of size 1 byte. The type is cast to the + * requested type in the relevant functions. */ - mutable std::vector recv_buffers; + mutable std::vector buffers; /** - * MPI requests for receiving. + * MPI requests for sending and receiving. + * + * @note Only allocated if not provided externally by user. */ - mutable std::vector recv_requests; + mutable std::vector requests; }; } // namespace MPI diff --git a/include/deal.II/base/mpi_noncontiguous_partitioner.templates.h b/include/deal.II/base/mpi_noncontiguous_partitioner.templates.h index 8b4cff10ca..588baa5bed 100644 --- a/include/deal.II/base/mpi_noncontiguous_partitioner.templates.h +++ b/include/deal.II/base/mpi_noncontiguous_partitioner.templates.h @@ -33,8 +33,7 @@ namespace Utilities { namespace MPI { - template - NoncontiguousPartitioner::NoncontiguousPartitioner( + NoncontiguousPartitioner::NoncontiguousPartitioner( const IndexSet &indexset_has, const IndexSet &indexset_want, const MPI_Comm &communicator) @@ -44,8 +43,7 @@ namespace Utilities - template - NoncontiguousPartitioner::NoncontiguousPartitioner( + NoncontiguousPartitioner::NoncontiguousPartitioner( const std::vector &indices_has, const std::vector &indices_want, const MPI_Comm & communicator) @@ -55,47 +53,41 @@ namespace Utilities - template std::pair - NoncontiguousPartitioner::n_targets() + NoncontiguousPartitioner::n_targets() { return {send_ranks.size(), recv_ranks.size()}; } - template types::global_dof_index - NoncontiguousPartitioner::memory_consumption() + NoncontiguousPartitioner::memory_consumption() { return MemoryConsumption::memory_consumption(send_ranks) + MemoryConsumption::memory_consumption(send_ptr) + MemoryConsumption::memory_consumption(send_indices) + - MemoryConsumption::memory_consumption(send_buffers) + - MemoryConsumption::memory_consumption(send_requests) + MemoryConsumption::memory_consumption(recv_ranks) + MemoryConsumption::memory_consumption(recv_ptr) + MemoryConsumption::memory_consumption(recv_indices) + - MemoryConsumption::memory_consumption(recv_buffers) + - MemoryConsumption::memory_consumption(recv_requests); + MemoryConsumption::memory_consumption(buffers) + + MemoryConsumption::memory_consumption(requests); } - template const MPI_Comm & - NoncontiguousPartitioner::get_mpi_communicator() const + NoncontiguousPartitioner::get_mpi_communicator() const { return communicator; } - template void - NoncontiguousPartitioner::reinit(const IndexSet &indexset_has, - const IndexSet &indexset_want, - const MPI_Comm &communicator) + NoncontiguousPartitioner::reinit(const IndexSet &indexset_has, + const IndexSet &indexset_want, + const MPI_Comm &communicator) { this->communicator = communicator; @@ -103,13 +95,11 @@ namespace Utilities send_ranks.clear(); send_ptr.clear(); send_indices.clear(); - send_buffers.clear(); - send_requests.clear(); recv_ranks.clear(); recv_ptr.clear(); recv_indices.clear(); - recv_buffers.clear(); - recv_requests.clear(); + buffers.clear(); + requests.clear(); // setup communication pattern std::vector owning_ranks_of_ghosts( @@ -150,15 +140,12 @@ namespace Utilities recv_ptr.push_back(recv_indices.size()); } - - recv_buffers.resize(recv_indices.size()); - recv_requests.resize(recv_map.size()); } { const auto targets_with_indexset = process.get_requesters(); - send_ptr.push_back(send_indices.size() /*=0*/); + send_ptr.push_back(recv_ptr.back()); for (const auto &target_with_indexset : targets_with_indexset) { send_ranks.push_back(target_with_indexset.first); @@ -166,19 +153,15 @@ namespace Utilities for (const auto &cell_index : target_with_indexset.second) send_indices.push_back(indexset_has.index_within_set(cell_index)); - send_ptr.push_back(send_indices.size()); + send_ptr.push_back(send_indices.size() + recv_ptr.back()); } - - send_buffers.resize(send_indices.size()); - send_requests.resize(targets_with_indexset.size()); } } - template void - NoncontiguousPartitioner::reinit( + NoncontiguousPartitioner::reinit( const std::vector &indices_has, const std::vector &indices_want, const MPI_Comm & communicator) @@ -259,65 +242,104 @@ namespace Utilities template - template void - NoncontiguousPartitioner::update_values(VectorType & dst, - const VectorType &src) const + NoncontiguousPartitioner::export_to_ghosted_array( + const ArrayView &src, + const ArrayView & dst) const { - const auto tag = internal::Tags::noncontiguous_partitioner_update_values; + // allocate internal memory since needed + if (requests.size() != send_ranks.size() + recv_ranks.size()) + requests.resize(send_ranks.size() + recv_ranks.size()); + + if (this->buffers.size() != send_ptr.back() * sizeof(Number)) + this->buffers.resize(send_ptr.back() * sizeof(Number), 0); + + // perform actual exchange + this->template export_to_ghosted_array( + 0, + src, + ArrayView(reinterpret_cast(this->buffers.data()), + send_ptr.back()), + dst, + this->requests); + } + - this->update_values_start(src, tag); - this->update_values_finish(dst, tag); + template + void + NoncontiguousPartitioner::export_to_ghosted_array( + const unsigned int communication_channel, + const ArrayView &locally_owned_array, + const ArrayView & temporary_storage, + const ArrayView & ghost_array, + std::vector & requests) const + { + this->template export_to_ghosted_array_start( + communication_channel, + locally_owned_array, + temporary_storage, + requests); + this->template export_to_ghosted_array_finish(temporary_storage, + ghost_array, + requests); } template - template void - NoncontiguousPartitioner::update_values_start( - const VectorType & src, - const unsigned int tag) const + NoncontiguousPartitioner::export_to_ghosted_array_start( + const unsigned int communication_channel, + const ArrayView &src, + const ArrayView & buffers, + std::vector & requests) const { #ifndef DEAL_II_WITH_MPI + (void)communication_channel; (void)src; - (void)tag; + (void)buffers; + (void)requests; Assert(false, ExcNeedsMPI()); #else + AssertIndexRange(communication_channel, 10); + + const auto tag = + communication_channel + + internal::Tags::noncontiguous_partitioner_update_ghost_values; + // post recv for (types::global_dof_index i = 0; i < recv_ranks.size(); i++) { - const auto ierr = MPI_Irecv(recv_buffers.data() + recv_ptr[i], - recv_ptr[i + 1] - recv_ptr[i], - Utilities::MPI::internal::mpi_type_id( - recv_buffers.data()), - recv_ranks[i], - tag, - communicator, - &recv_requests[i]); + const auto ierr = + MPI_Irecv(buffers.data() + recv_ptr[i], + recv_ptr[i + 1] - recv_ptr[i], + Utilities::MPI::internal::mpi_type_id(buffers.data()), + recv_ranks[i], + tag, + communicator, + &requests[i + send_ranks.size()]); AssertThrowMPI(ierr); } auto src_iterator = src.begin(); // post send - for (types::global_dof_index i = 0; i < send_ranks.size(); i++) + for (types::global_dof_index i = 0, k = 0; i < send_ranks.size(); i++) { // collect data to be send - for (types::global_dof_index j = send_ptr[i], c = 0; - j < send_ptr[i + 1]; + for (types::global_dof_index j = send_ptr[i]; j < send_ptr[i + 1]; j++) - send_buffers[send_ptr[i] + c++] = src_iterator[send_indices[j]]; + buffers[j] = src_iterator[send_indices[k++]]; // send data - const auto ierr = MPI_Isend(send_buffers.data() + send_ptr[i], - send_ptr[i + 1] - send_ptr[i], - Utilities::MPI::internal::mpi_type_id( - send_buffers.data()), - send_ranks[i], - tag, - communicator, - &send_requests[i]); + const auto ierr = + MPI_Isend(buffers.data() + send_ptr[i], + send_ptr[i + 1] - send_ptr[i], + Utilities::MPI::internal::mpi_type_id(buffers.data()), + send_ranks[i], + tag, + communicator, + &requests[i]); AssertThrowMPI(ierr); } #endif @@ -326,28 +348,27 @@ namespace Utilities template - template void - NoncontiguousPartitioner::update_values_finish( - VectorType & dst, - const unsigned int tag) const + NoncontiguousPartitioner::export_to_ghosted_array_finish( + const ArrayView &buffers, + const ArrayView & dst, + std::vector & requests) const { - (void)tag; - #ifndef DEAL_II_WITH_MPI + (void)buffers; (void)dst; + (void)requests; Assert(false, ExcNeedsMPI()); #else auto dst_iterator = dst.begin(); // receive all data packages and copy data from buffers - for (types::global_dof_index proc = 0; proc < recv_requests.size(); - proc++) + for (types::global_dof_index proc = 0; proc < recv_ranks.size(); proc++) { int i; MPI_Status status; - const auto ierr = MPI_Waitany(recv_requests.size(), - recv_requests.data(), + const auto ierr = MPI_Waitany(recv_ranks.size(), + requests.data() + send_ranks.size(), &i, &status); AssertThrowMPI(ierr); @@ -355,13 +376,12 @@ namespace Utilities for (types::global_dof_index j = recv_ptr[i], c = 0; j < recv_ptr[i + 1]; j++) - dst_iterator[recv_indices[j]] = recv_buffers[recv_ptr[i] + c++]; + dst_iterator[recv_indices[j]] = buffers[recv_ptr[i] + c++]; } // wait that all data packages have been sent - const auto ierr = MPI_Waitall(send_requests.size(), - send_requests.data(), - MPI_STATUSES_IGNORE); + const auto ierr = + MPI_Waitall(send_ranks.size(), requests.data(), MPI_STATUSES_IGNORE); AssertThrowMPI(ierr); #endif } diff --git a/include/deal.II/base/mpi_tags.h b/include/deal.II/base/mpi_tags.h index 4c5d2f5333..3cc35e1d39 100644 --- a/include/deal.II/base/mpi_tags.h +++ b/include/deal.II/base/mpi_tags.h @@ -130,7 +130,7 @@ namespace Utilities partitioner_export_end = partitioner_export_start + 200, /// NoncontiguousPartitioner::update_values - noncontiguous_partitioner_update_values, + noncontiguous_partitioner_update_ghost_values, // Utilities::MPI::compute_union compute_union, diff --git a/source/base/mpi_noncontiguous_partitioner.inst.in b/source/base/mpi_noncontiguous_partitioner.inst.in index e7719c667c..08b73a779f 100644 --- a/source/base/mpi_noncontiguous_partitioner.inst.in +++ b/source/base/mpi_noncontiguous_partitioner.inst.in @@ -21,32 +21,10 @@ for (S : REAL_SCALARS) \{ namespace MPI \{ - template class NoncontiguousPartitioner; - - template void - NoncontiguousPartitioner::update_values( - std::vector & dst, - const std::vector &src) const; - - template void - NoncontiguousPartitioner::update_values( - AlignedVector & dst, - const AlignedVector &src) const; - - template void - NoncontiguousPartitioner::update_values( - ArrayView & dst, - const ArrayView &src) const; - - template void - NoncontiguousPartitioner::update_values( - LinearAlgebra::Vector & dst, - const LinearAlgebra::Vector &src) const; - template void - NoncontiguousPartitioner::update_values( - LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src) const; + NoncontiguousPartitioner::export_to_ghosted_array( + const ArrayView &src, + const ArrayView & dst) const; \} \} } diff --git a/tests/base/mpi_noncontiguous_partitioner_01.cc b/tests/base/mpi_noncontiguous_partitioner_01.cc index 9cefef7c52..5e2a2edd76 100644 --- a/tests/base/mpi_noncontiguous_partitioner_01.cc +++ b/tests/base/mpi_noncontiguous_partitioner_01.cc @@ -40,16 +40,18 @@ test(const MPI_Comm comm) index_set_want.add_index(2); } - Utilities::MPI::NoncontiguousPartitioner vector(index_set_has, - index_set_want, - comm); + Utilities::MPI::NoncontiguousPartitioner vector(index_set_has, + index_set_want, + comm); AlignedVector src(index_set_has.n_elements()); AlignedVector dst(index_set_want.n_elements()); src[0] = Utilities::MPI::this_mpi_process(comm) * 100 + 1; - vector.update_values(dst, src); + vector.export_to_ghosted_array(ArrayView(src.data(), + src.size()), + ArrayView(dst.data(), dst.size())); for (size_t i = 0; i < src.size(); i++) deallog << static_cast(src[i]) << " "; diff --git a/tests/base/mpi_noncontiguous_partitioner_02.cc b/tests/base/mpi_noncontiguous_partitioner_02.cc index d419bd6974..11e9912dfa 100644 --- a/tests/base/mpi_noncontiguous_partitioner_02.cc +++ b/tests/base/mpi_noncontiguous_partitioner_02.cc @@ -114,9 +114,9 @@ test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir) if (do_revert) std::reverse(indices_want.begin(), indices_want.end()); - Utilities::MPI::NoncontiguousPartitioner vector(indices_has, - indices_want, - comm); + Utilities::MPI::NoncontiguousPartitioner vector(indices_has, + indices_want, + comm); AlignedVector src(indices_has.size()); for (unsigned int i = 0; i < indices_has.size(); i++) @@ -125,7 +125,9 @@ test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir) AlignedVector dst(indices_want.size()); - vector.update_values(dst, src); + vector.export_to_ghosted_array(ArrayView(src.data(), + src.size()), + ArrayView(dst.data(), dst.size())); for (size_t i = 0; i < src.size(); i++) deallog << static_cast(src[i]) << " "; diff --git a/tests/base/mpi_noncontiguous_partitioner_03.cc b/tests/base/mpi_noncontiguous_partitioner_03.cc index 3e0932d0ec..bededdf795 100644 --- a/tests/base/mpi_noncontiguous_partitioner_03.cc +++ b/tests/base/mpi_noncontiguous_partitioner_03.cc @@ -27,7 +27,7 @@ test(const MPI_Comm comm, std::vector index_set_has, std::vector index_set_want) { - Utilities::MPI::NoncontiguousPartitioner vector; + Utilities::MPI::NoncontiguousPartitioner vector; vector.reinit(index_set_has, index_set_want, comm); AlignedVector src(index_set_has.size(), 0); @@ -36,7 +36,9 @@ test(const MPI_Comm comm, for (unsigned int i = 0; i < index_set_has.size(); i++) src[i] = Utilities::MPI::this_mpi_process(comm) * 100 + i; - vector.update_values(dst, src); + vector.export_to_ghosted_array(ArrayView(src.data(), + src.size()), + ArrayView(dst.data(), dst.size())); for (size_t i = 0; i < src.size(); i++) deallog << static_cast(src[i]) << " ";