--- /dev/null
+Improved: The new function
+Utilities::MPI::compute_index_owner_and_requesters()
+allows to compute index owners but also returns the requesters
+for locally owned indices.
+<br>
+(Peter Munch, 2024/10/15)
* owned indices, these indices will be treated correctly and the rank of
* this process is returned for those entries.
*
- * @note This is a @ref GlossCollectiveOperation "collective operation": all processes within the given
+ * @note This is a @ref GlossCollectiveOperation "collective operation":
+ * all processes within the given
* communicator have to call this function. Since this function does not
* use MPI_Alltoall or MPI_Allgather, but instead uses non-blocking
* point-to-point communication instead, and only a single non-blocking
const IndexSet &indices_to_look_up,
const MPI_Comm comm);
+ /**
+ * Just like the function above, this function computes the owning MPI
+ * process rank of each element of a second index set according to the
+ * partitioned index set, given a partitioned index set space. In addition,
+ * it returns a map of processes and associated sets of indices that are
+ * requested from the current rank. In other words, this function returns
+ * for each rank that has requested information about indices owned by the
+ * current which indices it has requested about; the values of the map are
+ * therefore all subsets of the owned set of indices.
+ */
+ std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
+ compute_index_owner_and_requesters(const IndexSet &owned_indices,
+ const IndexSet &indices_to_look_up,
+ const MPI_Comm &comm);
+
/**
* Compute the union of the input vectors @p vec of all processes in the
* MPI communicator @p comm.
IndexSet locally_owned_dofs(total_sum);
locally_owned_dofs.add_range(prefix_sum, prefix_sum + local_size);
- Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
- process(locally_owned_dofs, locally_active_dofs, comm, dummy, true);
-
- Utilities::MPI::ConsensusAlgorithms::Selector<
- std::vector<
- std::pair<types::global_dof_index, types::global_dof_index>>,
- std::vector<unsigned int>>
- consensus_algorithm;
- consensus_algorithm.run(process, comm);
-
using T1 = std::vector<
std::pair<types::global_dof_index,
std::vector<std::pair<types::global_dof_index, Number>>>>;
- auto requesters = process.get_requesters();
+ std::map<unsigned int, IndexSet> requesters;
+ std::tie(std::ignore, requesters) =
+ Utilities::MPI::compute_index_owner_and_requesters(locally_owned_dofs,
+ locally_active_dofs,
+ comm);
std::vector<std::vector<std::pair<types::global_dof_index, Number>>>
locally_relevant_matrix_entries(locally_active_dofs.n_elements());
+ std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
+ compute_index_owner_and_requesters(const IndexSet &owned_indices,
+ const IndexSet &indices_to_look_up,
+ const MPI_Comm &comm)
+ {
+ Assert(owned_indices.size() == indices_to_look_up.size(),
+ ExcMessage("IndexSets have to have the same sizes."));
+
+ Assert(
+ owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
+ ExcMessage("IndexSets have to have the same size on all processes."));
+
+ std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
+
+ // Step 1: setup dictionary
+ // The input owned_indices can be partitioned arbitrarily. In the
+ // dictionary, the index set is statically repartitioned among the
+ // processes again and extended with information with the actual owner
+ // of that the index.
+ internal::ComputeIndexOwner::ConsensusAlgorithmsPayload process(
+ owned_indices, indices_to_look_up, comm, owning_ranks, true);
+
+ // Step 2: read dictionary
+ // Communicate with the process who owns the index in the static
+ // partition (i.e. in the dictionary). This process returns the actual
+ // owner of the index.
+ ConsensusAlgorithms::Selector<
+ std::vector<
+ std::pair<types::global_dof_index, types::global_dof_index>>,
+ std::vector<unsigned int>>
+ consensus_algorithm;
+ consensus_algorithm.run(process, comm);
+
+ return {owning_ranks, process.get_requesters()};
+ }
+
+
+
namespace internal
{
namespace CollectiveMutexImplementation