From: Wolfgang Bangerth Date: Wed, 29 Dec 2021 01:51:47 +0000 (-0700) Subject: Add a bit of documentation. X-Git-Tag: v9.4.0-rc1~660^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e4d669b898fcb8e57dc7d09dfa63093ed7c63d27;p=dealii.git Add a bit of documentation. --- diff --git a/source/base/mpi.cc b/source/base/mpi.cc index da923fd1aa..63355aa8f5 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -386,16 +386,24 @@ namespace Utilities } # if DEAL_II_MPI_VERSION_GTE(3, 0) - // check if destinations contain duplicates - auto destinations_unique = destinations; - std::sort(destinations_unique.begin(), destinations_unique.end()); - destinations_unique.erase(std::unique(destinations_unique.begin(), - destinations_unique.end()), - destinations_unique.end()); - if (Utilities::MPI::min(destinations.size() == - destinations_unique.size(), - mpi_comm)) + // Have a little function that checks if destinations provided + // to the current process are unique: + const bool my_destinations_are_unique = [destinations]() { + std::vector my_destinations = destinations; + const unsigned int n_destinations = my_destinations.size(); + std::sort(my_destinations.begin(), my_destinations.end()); + my_destinations.erase(std::unique(my_destinations.begin(), + my_destinations.end()), + my_destinations.end()); + return (my_destinations.size() == n_destinations); + }(); + + // If all processes report that they have unique destinations, + // then we can short-cut the process using a consensus algorithm (which + // is implemented only for the case of unique destinations): + if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) == + 1) { ConsensusAlgorithms::AnonymousProcess process( [&]() { return destinations; }); @@ -403,119 +411,130 @@ namespace Utilities mpi_comm); return consensus_algorithm.run(); } + // If that was not the case, we need to use the remainder of the code + // below, i.e., just fall through the if condition above. # endif - { + + + // So we need to run a different algorithm, specifically one that + // requires more memory -- MPI_Reduce_scatter_block will require memory + // proportional to the number of processes involved; that function is + // also only available for MPI 2.2 or later: # if DEAL_II_MPI_VERSION_GTE(2, 2) + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, mpi_comm); - static CollectiveMutex mutex; - CollectiveMutex::ScopedLock lock(mutex, mpi_comm); + const int mpi_tag = + internal::Tags::compute_point_to_point_communication_pattern; - const int mpi_tag = - internal::Tags::compute_point_to_point_communication_pattern; + // Calculate the number of messages to send to each process + std::vector dest_vector(n_procs); + for (const auto &el : destinations) + ++dest_vector[el]; - // Calculate the number of messages to send to each process - std::vector dest_vector(n_procs); - for (const auto &el : destinations) - ++dest_vector[el]; + // Find how many processes will send to this one + // by reducing with sum and then scattering the + // results over all processes + unsigned int n_recv_from; + const int ierr = MPI_Reduce_scatter_block( + dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm); - // Find how many processes will send to this one - // by reducing with sum and then scattering the - // results over all processes - unsigned int n_recv_from; - const int ierr = MPI_Reduce_scatter_block( - dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm); + AssertThrowMPI(ierr); - AssertThrowMPI(ierr); + // Send myid to every process in `destinations` vector... + std::vector send_requests(destinations.size()); + for (const auto &el : destinations) + { + const int ierr = + MPI_Isend(&myid, + 1, + MPI_UNSIGNED, + el, + mpi_tag, + mpi_comm, + send_requests.data() + (&el - destinations.data())); + AssertThrowMPI(ierr); + } - // Send myid to every process in `destinations` vector... - std::vector send_requests(destinations.size()); - for (const auto &el : destinations) - { - const int ierr = - MPI_Isend(&myid, - 1, - MPI_UNSIGNED, - el, - mpi_tag, - mpi_comm, - send_requests.data() + (&el - destinations.data())); - AssertThrowMPI(ierr); - } + // Receive `n_recv_from` times from the processes + // who communicate with this one. Store the obtained id's + // in the resulting vector + std::vector origins(n_recv_from); + for (auto &el : origins) + { + const int ierr = MPI_Recv(&el, + 1, + MPI_UNSIGNED, + MPI_ANY_SOURCE, + mpi_tag, + mpi_comm, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } - // Receive `n_recv_from` times from the processes - // who communicate with this one. Store the obtained id's - // in the resulting vector - std::vector origins(n_recv_from); - for (auto &el : origins) - { - const int ierr = MPI_Recv(&el, - 1, - MPI_UNSIGNED, - MPI_ANY_SOURCE, - mpi_tag, - mpi_comm, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - } + if (destinations.size() > 0) + { + const int ierr = MPI_Waitall(destinations.size(), + send_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } - if (destinations.size() > 0) - { - const int ierr = MPI_Waitall(destinations.size(), - send_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } + return origins; - return origins; # else - // let all processors communicate the maximal number of destinations - // they have - const unsigned int max_n_destinations = - Utilities::MPI::max(destinations.size(), mpi_comm); - - if (max_n_destinations == 0) - // all processes have nothing to send/receive: - return std::vector(); - - // now that we know the number of data packets every processor wants to - // send, set up a buffer with the maximal size and copy our destinations - // in there, padded with -1's - std::vector my_destinations( - max_n_destinations, numbers::invalid_unsigned_int); - std::copy(destinations.begin(), - destinations.end(), - my_destinations.begin()); - - // now exchange these (we could communicate less data if we used - // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all - // processors in this case, which is more expensive than the reduction - // operation above in MPI_Allreduce) - std::vector all_destinations(max_n_destinations * - n_procs); - const int ierr = MPI_Allgather(my_destinations.data(), - max_n_destinations, - MPI_UNSIGNED, - all_destinations.data(), - max_n_destinations, - MPI_UNSIGNED, - mpi_comm); - AssertThrowMPI(ierr); - // now we know who is going to communicate with whom. collect who is - // going to communicate with us! - std::vector origins; - for (unsigned int i = 0; i < n_procs; ++i) - for (unsigned int j = 0; j < max_n_destinations; ++j) - if (all_destinations[i * max_n_destinations + j] == myid) - origins.push_back(i); - else if (all_destinations[i * max_n_destinations + j] == - numbers::invalid_unsigned_int) - break; - - return origins; + // If we don't have MPI_Reduce_scatter_block available, fall back to + // a different algorithm that requires even more memory: the number + // of processes times the max over the number of destinations they + // each have: + + // Start by letting all processors communicate the maximal number of + // destinations they have: + const unsigned int max_n_destinations = + Utilities::MPI::max(destinations.size(), mpi_comm); + + if (max_n_destinations == 0) + // all processes have nothing to send/receive: + return std::vector(); + + // now that we know the number of data packets every processor wants to + // send, set up a buffer with the maximal size and copy our destinations + // in there, padded with -1's + std::vector my_destinations(max_n_destinations, + numbers::invalid_unsigned_int); + std::copy(destinations.begin(), + destinations.end(), + my_destinations.begin()); + + // now exchange these (we could communicate less data if we used + // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all + // processors in this case, which is more expensive than the reduction + // operation above in MPI_Allreduce) + std::vector all_destinations(max_n_destinations * n_procs); + const int ierr = MPI_Allgather(my_destinations.data(), + max_n_destinations, + MPI_UNSIGNED, + all_destinations.data(), + max_n_destinations, + MPI_UNSIGNED, + mpi_comm); + AssertThrowMPI(ierr); + + // now we know who is going to communicate with whom. collect who is + // going to communicate with us! + std::vector origins; + for (unsigned int i = 0; i < n_procs; ++i) + for (unsigned int j = 0; j < max_n_destinations; ++j) + if (all_destinations[i * max_n_destinations + j] == myid) + origins.push_back(i); + else if (all_destinations[i * max_n_destinations + j] == + numbers::invalid_unsigned_int) + break; + + return origins; # endif - } }