From: Wolfgang Bangerth Date: Tue, 27 Aug 2024 16:45:42 +0000 (-0600) Subject: Substantially simplify code by using MPI::some_to_some(). X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F17618%2Fhead;p=dealii.git Substantially simplify code by using MPI::some_to_some(). --- diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index bcf83cf3bb..2f311b9e56 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -473,11 +473,8 @@ namespace internal constrained_indices_process.get_requesters(); { - const unsigned int tag = Utilities::MPI::internal::Tags:: - affine_constraints_make_consistent_in_parallel_0; - // ... collect data and sort according to owner - std::map> send_data_temp; + std::map> send_data; for (unsigned int i = 0; i < constrained_indices_owners.size(); ++i) { @@ -498,87 +495,25 @@ namespace internal if (constrained_indices_owners[i] == my_rank) locally_relevant_constraints.push_back(entry); else - send_data_temp[constrained_indices_owners[i]].push_back(entry); - } - - std::map> send_data; - - for (const typename std::map>::value_type &i : - send_data_temp) - send_data[i.first] = Utilities::pack(i.second, false); - - std::vector requests; - requests.reserve(send_data.size()); - - // ... send data - for (const std::map>::value_type &i : - send_data) - { - if (i.first == my_rank) - continue; - - requests.push_back({}); - - const int ierr = MPI_Isend(i.second.data(), - i.second.size(), - MPI_CHAR, - i.first, - tag, - mpi_communicator, - &requests.back()); - AssertThrowMPI(ierr); + send_data[constrained_indices_owners[i]].push_back(entry); } - // ... receive data - unsigned int n_rec_ranks = 0; - - for (const std::map::value_type &i : - constrained_indices_by_ranks) - if (i.first != my_rank) - ++n_rec_ranks; - - for (unsigned int i = 0; i < n_rec_ranks; ++i) - { - MPI_Status status; - int ierr = MPI_Probe(MPI_ANY_SOURCE, tag, mpi_communicator, &status); - AssertThrowMPI(ierr); - - int message_length; - ierr = MPI_Get_count(&status, MPI_CHAR, &message_length); - AssertThrowMPI(ierr); - - std::vector buffer(message_length); - - ierr = MPI_Recv(buffer.data(), - buffer.size(), - MPI_CHAR, - status.MPI_SOURCE, - tag, - mpi_communicator, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - const auto data = - Utilities::unpack>(buffer, false); - - for (const auto &i : data) - locally_relevant_constraints.push_back(i); - } + // Now exchange this data between processes: + const std::map> received_data = + Utilities::MPI::some_to_some(mpi_communicator, send_data); - const int ierr = - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); + // Finally collate everything into one array, sort, and make it unique: + for (const auto &[rank, constraints] : received_data) + locally_relevant_constraints.insert(locally_relevant_constraints.end(), + constraints.begin(), + constraints.end()); sort_and_make_unique(locally_relevant_constraints); } - // step 3: communicate constraints so that each process know how the + // step 3: communicate constraints so that each process knows how the // locally locally relevant dofs are constrained { - const unsigned int tag = Utilities::MPI::internal::Tags:: - affine_constraints_make_consistent_in_parallel_1; - // ... determine owners of locally relevant dofs IndexSet locally_relevant_dofs_non_local = locally_relevant_dofs; locally_relevant_dofs_non_local.subtract_set(locally_owned_dofs); @@ -602,12 +537,8 @@ namespace internal const auto locally_relevant_dofs_by_ranks = locally_relevant_dofs_process.get_requesters(); - std::map> send_data; + std::map> send_data; - std::vector requests; - requests.reserve(send_data.size()); - - // ... send data for (const std::map::value_type &rank_and_indices : locally_relevant_dofs_by_ranks) { @@ -629,68 +560,17 @@ namespace internal data.push_back(*ptr); } - send_data[rank_and_indices.first] = Utilities::pack(data, false); - - requests.push_back({}); - - const int ierr = MPI_Isend(send_data[rank_and_indices.first].data(), - send_data[rank_and_indices.first].size(), - MPI_CHAR, - rank_and_indices.first, - tag, - mpi_communicator, - &requests.back()); - AssertThrowMPI(ierr); + send_data[rank_and_indices.first] = std::move(data); } - // ... receive data - const unsigned int n_rec_ranks = [&]() { - // count number of ranks from where data will be received from - // by looping locally_relevant_dofs_owners and identifying unique - // rank (ignoring the current rank) - - std::set rec_ranks; - - for (const unsigned int rank : locally_relevant_dofs_owners) - if (rank != my_rank) - rec_ranks.insert(rank); - - return rec_ranks.size(); - }(); - - for (unsigned int counter = 0; counter < n_rec_ranks; ++counter) - { - MPI_Status status; - int ierr = MPI_Probe(MPI_ANY_SOURCE, tag, mpi_communicator, &status); - AssertThrowMPI(ierr); - - int message_length; - ierr = MPI_Get_count(&status, MPI_CHAR, &message_length); - AssertThrowMPI(ierr); - - std::vector buffer(message_length); - - ierr = MPI_Recv(buffer.data(), - buffer.size(), - MPI_CHAR, - status.MPI_SOURCE, - tag, - mpi_communicator, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - const std::vector - received_locally_relevant_constrain = - Utilities::unpack>(buffer, false); - - for (const auto &locally_relevant_constrain : - received_locally_relevant_constrain) - locally_relevant_constraints.push_back(locally_relevant_constrain); - } + const std::map> received_data = + Utilities::MPI::some_to_some(mpi_communicator, send_data); - const int ierr = - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); + // Finally collate everything into one array, sort, and make it unique: + for (const auto &[rank, constraints] : received_data) + locally_relevant_constraints.insert(locally_relevant_constraints.end(), + constraints.begin(), + constraints.end()); sort_and_make_unique(locally_relevant_constraints); }