From 314a8c0aaa79c0ee61d41aaeab2b42b4f7a05cd6 Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Tue, 15 Oct 2024 10:35:28 +0200 Subject: [PATCH] Introduce Utilities::MPI::compute_index_owner_and_requesters() --- doc/news/changes/minor/20241015Munch-2 | 6 ++++ include/deal.II/base/mpi.h | 18 ++++++++++- include/deal.II/lac/sparse_matrix_tools.h | 16 +++------- source/base/mpi.cc | 38 +++++++++++++++++++++++ 4 files changed, 66 insertions(+), 12 deletions(-) create mode 100644 doc/news/changes/minor/20241015Munch-2 diff --git a/doc/news/changes/minor/20241015Munch-2 b/doc/news/changes/minor/20241015Munch-2 new file mode 100644 index 0000000000..aa89bccac6 --- /dev/null +++ b/doc/news/changes/minor/20241015Munch-2 @@ -0,0 +1,6 @@ +Improved: The new function +Utilities::MPI::compute_index_owner_and_requesters() +allows to compute index owners but also returns the requesters +for locally owned indices. +
+(Peter Munch, 2024/10/15) diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h index 85415eb641..20bca94947 100644 --- a/include/deal.II/base/mpi.h +++ b/include/deal.II/base/mpi.h @@ -1387,7 +1387,8 @@ namespace Utilities * owned indices, these indices will be treated correctly and the rank of * this process is returned for those entries. * - * @note This is a @ref GlossCollectiveOperation "collective operation": all processes within the given + * @note This is a @ref GlossCollectiveOperation "collective operation": + * all processes within the given * communicator have to call this function. Since this function does not * use MPI_Alltoall or MPI_Allgather, but instead uses non-blocking * point-to-point communication instead, and only a single non-blocking @@ -1409,6 +1410,21 @@ namespace Utilities const IndexSet &indices_to_look_up, const MPI_Comm comm); + /** + * Just like the function above, this function computes the owning MPI + * process rank of each element of a second index set according to the + * partitioned index set, given a partitioned index set space. In addition, + * it returns a map of processes and associated sets of indices that are + * requested from the current rank. In other words, this function returns + * for each rank that has requested information about indices owned by the + * current which indices it has requested about; the values of the map are + * therefore all subsets of the owned set of indices. + */ + std::pair, std::map> + compute_index_owner_and_requesters(const IndexSet &owned_indices, + const IndexSet &indices_to_look_up, + const MPI_Comm &comm); + /** * Compute the union of the input vectors @p vec of all processes in the * MPI communicator @p comm. diff --git a/include/deal.II/lac/sparse_matrix_tools.h b/include/deal.II/lac/sparse_matrix_tools.h index 182d01bfc9..157b3a16ae 100644 --- a/include/deal.II/lac/sparse_matrix_tools.h +++ b/include/deal.II/lac/sparse_matrix_tools.h @@ -215,21 +215,15 @@ namespace SparseMatrixTools IndexSet locally_owned_dofs(total_sum); locally_owned_dofs.add_range(prefix_sum, prefix_sum + local_size); - Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload - process(locally_owned_dofs, locally_active_dofs, comm, dummy, true); - - Utilities::MPI::ConsensusAlgorithms::Selector< - std::vector< - std::pair>, - std::vector> - consensus_algorithm; - consensus_algorithm.run(process, comm); - using T1 = std::vector< std::pair>>>; - auto requesters = process.get_requesters(); + std::map requesters; + std::tie(std::ignore, requesters) = + Utilities::MPI::compute_index_owner_and_requesters(locally_owned_dofs, + locally_active_dofs, + comm); std::vector>> locally_relevant_matrix_entries(locally_active_dofs.n_elements()); diff --git a/source/base/mpi.cc b/source/base/mpi.cc index ed3162f95f..9aa0c4c6d4 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -735,6 +735,44 @@ namespace Utilities + std::pair, std::map> + compute_index_owner_and_requesters(const IndexSet &owned_indices, + const IndexSet &indices_to_look_up, + const MPI_Comm &comm) + { + Assert(owned_indices.size() == indices_to_look_up.size(), + ExcMessage("IndexSets have to have the same sizes.")); + + Assert( + owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm), + ExcMessage("IndexSets have to have the same size on all processes.")); + + std::vector owning_ranks(indices_to_look_up.n_elements()); + + // Step 1: setup dictionary + // The input owned_indices can be partitioned arbitrarily. In the + // dictionary, the index set is statically repartitioned among the + // processes again and extended with information with the actual owner + // of that the index. + internal::ComputeIndexOwner::ConsensusAlgorithmsPayload process( + owned_indices, indices_to_look_up, comm, owning_ranks, true); + + // Step 2: read dictionary + // Communicate with the process who owns the index in the static + // partition (i.e. in the dictionary). This process returns the actual + // owner of the index. + ConsensusAlgorithms::Selector< + std::vector< + std::pair>, + std::vector> + consensus_algorithm; + consensus_algorithm.run(process, comm); + + return {owning_ranks, process.get_requesters()}; + } + + + namespace internal { namespace CollectiveMutexImplementation -- 2.39.5