From: Timo Heister Date: Wed, 6 Nov 2019 16:32:12 +0000 (-0500) Subject: use global list of MPI tags for collective communication X-Git-Tag: v9.2.0-rc1~879^2~5 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0a708f8b85fa2096dc351be3de2ea7e2b560d015;p=dealii.git use global list of MPI tags for collective communication introduce Utilities::MPI::internal::Tags with a list of unique MPI tags. part of #8958 --- diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h index fc03961682..2b2d432b61 100644 --- a/include/deal.II/base/mpi.h +++ b/include/deal.II/base/mpi.h @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -1114,10 +1115,6 @@ namespace Utilities class ConsensusAlgorithm_NBX : public ConsensusAlgorithm { public: - // Unique tags to be used during Isend and Irecv - static const unsigned int tag_request = 12; - static const unsigned int tag_delivery = 13; - /** * Constructor. * @@ -1256,10 +1253,6 @@ namespace Utilities class ConsensusAlgorithm_PEX : public ConsensusAlgorithm { public: - // Unique tags to be used during Isend and Irecv - static const unsigned int tag_request = 14; - static const unsigned int tag_delivery = 15; - /** * Constructor. * @@ -1508,6 +1501,9 @@ namespace Utilities static CollectiveMutex mutex; CollectiveMutex::ScopedLock lock(mutex, comm); + const int mpi_tag = + internal::Tags::compute_point_to_point_communication_pattern; + // Sending buffers std::vector> buffers_to_send(send_to.size()); std::vector buffer_send_requests(send_to.size()); @@ -1521,7 +1517,7 @@ namespace Utilities buffers_to_send[i].size(), MPI_CHAR, rank, - 21, + mpi_tag, comm, &buffer_send_requests[i]); AssertThrowMPI(ierr); @@ -1538,7 +1534,7 @@ namespace Utilities { // Probe what's going on. Take data from the first available sender MPI_Status status; - int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status); + int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status); AssertThrowMPI(ierr); // Length of the message @@ -1551,8 +1547,13 @@ namespace Utilities const unsigned int rank = status.MPI_SOURCE; // Actually receive the message - ierr = MPI_Recv( - buffer.data(), len, MPI_CHAR, rank, 21, comm, MPI_STATUS_IGNORE); + ierr = MPI_Recv(buffer.data(), + len, + MPI_CHAR, + status.MPI_SOURCE, + status.MPI_TAG, + comm, + MPI_STATUS_IGNORE); AssertThrowMPI(ierr); Assert(received_objects.find(rank) == received_objects.end(), ExcInternalError( diff --git a/include/deal.II/base/mpi_compute_index_owner_internal.h b/include/deal.II/base/mpi_compute_index_owner_internal.h index 3587b2b31e..34495b320a 100644 --- a/include/deal.II/base/mpi_compute_index_owner_internal.h +++ b/include/deal.II/base/mpi_compute_index_owner_internal.h @@ -41,12 +41,6 @@ namespace Utilities */ struct Dictionary { - /** - * A tag attached to the MPI communication during the dictionary - * lookup - */ - static const unsigned int tag_setup = 11; - /** * The minimum grain size for the ranges. */ @@ -188,6 +182,9 @@ namespace Utilities static CollectiveMutex mutex; CollectiveMutex::ScopedLock lock(mutex, comm); + const int mpi_tag = + Utilities::MPI::internal::Tags::dictionary_reinit; + n_dict_procs_in_owned_indices = buffers.size(); std::vector request; request.reserve(n_dict_procs_in_owned_indices); @@ -200,7 +197,7 @@ namespace Utilities rank_pair.second.size() * 2, DEAL_II_DOF_INDEX_MPI_TYPE, rank_pair.first, - tag_setup, + mpi_tag, comm, &request.back()); AssertThrowMPI(ierr); @@ -211,7 +208,7 @@ namespace Utilities { // wait for an incoming message MPI_Status status; - auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_setup, comm, &status); + auto ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status); AssertThrowMPI(ierr); // retrieve size of incoming message @@ -232,8 +229,8 @@ namespace Utilities ierr = MPI_Recv(buffer.data(), number_amount, DEAL_II_DOF_INDEX_MPI_TYPE, - other_rank, - tag_setup, + status.MPI_TAG, + status.MPI_SOURCE, comm, &status); AssertThrowMPI(ierr); @@ -637,6 +634,9 @@ namespace Utilities static CollectiveMutex mutex; CollectiveMutex::ScopedLock lock(mutex, comm); + const int mpi_tag = Utilities::MPI::internal::Tags:: + consensus_algorithm_payload_get_requesters; + // reserve enough slots for the requests ahead; depending on // whether the owning rank is one of the requesters or not, we // might have one less requests to execute, so fill the requests @@ -688,7 +688,7 @@ namespace Utilities send_data[i].size(), MPI_UNSIGNED, dict.actually_owning_rank_list[i], - 1021, + mpi_tag, comm, &send_requests.back()); AssertThrowMPI(ierr); @@ -702,7 +702,7 @@ namespace Utilities // wait for an incoming message MPI_Status status; unsigned int ierr = - MPI_Probe(MPI_ANY_SOURCE, 1021, comm, &status); + MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status); AssertThrowMPI(ierr); // retrieve size of incoming message @@ -718,7 +718,7 @@ namespace Utilities number_amount, MPI_UNSIGNED, status.MPI_SOURCE, - 1021, + status.MPI_TAG, comm, &status); AssertThrowMPI(ierr); diff --git a/include/deal.II/fe/fe_tools_extrapolate.templates.h b/include/deal.II/fe/fe_tools_extrapolate.templates.h index 2d69b02f43..1fa5775abb 100644 --- a/include/deal.II/fe/fe_tools_extrapolate.templates.h +++ b/include/deal.II/fe/fe_tools_extrapolate.templates.h @@ -1152,6 +1152,10 @@ namespace FETools static Utilities::MPI::CollectiveMutex mutex; Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, communicator); + // We pick a new tag in each round. Wrap around after 10 rounds: + const int mpi_tag = + Utilities::MPI::internal::Tags::fe_tools_extrapolate + round % 10; + // send data unsigned int idx = 0; for (typename std::vector::const_iterator it = @@ -1166,7 +1170,7 @@ namespace FETools buffer->size(), MPI_BYTE, it->receiver, - round, + mpi_tag, communicator, &requests[idx]); AssertThrowMPI(ierr); @@ -1184,9 +1188,10 @@ namespace FETools for (unsigned int index = 0; index < n_senders; ++index) { MPI_Status status; - int len; - int ierr = MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status); + int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, communicator, &status); AssertThrowMPI(ierr); + + int len; ierr = MPI_Get_count(&status, MPI_BYTE, &len); AssertThrowMPI(ierr); receive.resize(len); diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index 9726c0b736..fbf76afc53 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -3962,6 +3962,9 @@ namespace GridTools Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator()); + const int mpi_tag = + Utilities::MPI::internal::Tags::exchange_cell_data_to_ghosts; + // 2. send our messages std::set ghost_owners = tria->ghost_owners(); const unsigned int n_ghost_owners = ghost_owners.size(); @@ -3982,7 +3985,7 @@ namespace GridTools sendbuffers[idx].size(), MPI_BYTE, *it, - 786, + mpi_tag, tria->get_communicator(), &requests[idx]); AssertThrowMPI(ierr); @@ -3993,10 +3996,11 @@ namespace GridTools for (unsigned int idx = 0; idx < n_ghost_owners; ++idx) { MPI_Status status; - int len; int ierr = - MPI_Probe(MPI_ANY_SOURCE, 786, tria->get_communicator(), &status); + MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status); AssertThrowMPI(ierr); + + int len; ierr = MPI_Get_count(&status, MPI_BYTE, &len); AssertThrowMPI(ierr); diff --git a/source/base/mpi.cc b/source/base/mpi.cc index c5c84b3310..2b787c80e6 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -241,6 +242,9 @@ namespace Utilities static CollectiveMutex mutex; CollectiveMutex::ScopedLock lock(mutex, mpi_comm); + const int mpi_tag = + internal::Tags::compute_point_to_point_communication_pattern; + // Calculate the number of messages to send to each process std::vector dest_vector(n_procs); for (const auto &el : destinations) @@ -264,7 +268,7 @@ namespace Utilities 1, MPI_UNSIGNED, el, - 32766, + mpi_tag, mpi_comm, send_requests.data() + (&el - destinations.data())); AssertThrowMPI(ierr); @@ -281,7 +285,7 @@ namespace Utilities 1, MPI_UNSIGNED, MPI_ANY_SOURCE, - 32766, + mpi_tag, mpi_comm, MPI_STATUS_IGNORE); AssertThrowMPI(ierr); @@ -1040,6 +1044,12 @@ namespace Utilities ConsensusAlgorithm_NBX::process_requests() { #ifdef DEAL_II_WITH_MPI + + const int tag_request = + Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_request; + const int tag_deliver = + Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_deliver; + // check if there is a request pending MPI_Status status; int request_is_pending; @@ -1093,7 +1103,7 @@ namespace Utilities request_buffer.size() * sizeof(T2), MPI_BYTE, other_rank, - tag_delivery, + tag_deliver, this->comm, request_requests.back().get()); AssertThrowMPI(ierr); @@ -1112,6 +1122,11 @@ namespace Utilities targets = this->process.compute_targets(); const auto n_targets = targets.size(); + const int tag_request = + Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_request; + const int tag_deliver = + Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_deliver; + // 2) allocate memory recv_buffers.resize(n_targets); recv_requests.resize(n_targets); @@ -1146,7 +1161,7 @@ namespace Utilities recv_buffer.size() * sizeof(T2), MPI_BYTE, rank, - tag_delivery, + tag_deliver, this->comm, &recv_requests[index]); AssertThrowMPI(ierr); @@ -1302,6 +1317,11 @@ namespace Utilities ConsensusAlgorithm_PEX::process_requests(int index) { #ifdef DEAL_II_WITH_MPI + const int tag_request = + Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_request; + const int tag_deliver = + Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_deliver; + MPI_Status status; MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status); @@ -1336,7 +1356,7 @@ namespace Utilities request_buffer.size() * sizeof(T2), MPI_BYTE, other_rank, - tag_delivery, + tag_deliver, this->comm, &requests_answers[index]); AssertThrowMPI(ierr); @@ -1355,6 +1375,11 @@ namespace Utilities // 1) determine with which processes this process wants to communicate targets = this->process.compute_targets(); + const int tag_request = + Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_request; + const int tag_deliver = + Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_deliver; + // 2) determine who wants to communicate with this process const bool use_nbx = false; if (!use_nbx) @@ -1409,7 +1434,7 @@ namespace Utilities recv_buffer.size() * sizeof(T2), MPI_BYTE, rank, - tag_delivery, + tag_deliver, this->comm, &send_and_recv_buffers[i]); AssertThrowMPI(ierr); diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 4e0d27039a..6e357df483 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -4231,6 +4231,9 @@ namespace parallel Utilities::MPI::CollectiveMutex::ScopedLock lock( mutex, this->get_communicator()); + const int mpi_tag = Utilities::MPI::internal::Tags:: + triangulation_communicate_locally_moved_vertices; + std::vector> sendbuffers(needs_to_get_cells.size()); std::vector>::iterator buffer = sendbuffers.begin(); std::vector requests(needs_to_get_cells.size()); @@ -4261,7 +4264,7 @@ namespace parallel buffer->size(), MPI_BYTE, it->first, - 123, + mpi_tag, this->get_communicator(), &requests[idx]); AssertThrowMPI(ierr); @@ -4282,10 +4285,13 @@ namespace parallel for (unsigned int i = 0; i < n_senders; ++i) { MPI_Status status; - int len; - int ierr = - MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status); + int ierr = MPI_Probe(MPI_ANY_SOURCE, + mpi_tag, + this->get_communicator(), + &status); AssertThrowMPI(ierr); + + int len; ierr = MPI_Get_count(&status, MPI_BYTE, &len); AssertThrowMPI(ierr); receive.resize(len); diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 20b20a36eb..8320abbe96 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -4435,6 +4435,11 @@ namespace internal Utilities::MPI::CollectiveMutex::ScopedLock lock( mutex, tria.get_communicator()); + const int mpi_tag = Utilities::MPI::internal::Tags:: + dofhandler_communicate_mg_ghost_cells; + const int mpi_tag_reply = Utilities::MPI::internal::Tags:: + dofhandler_communicate_mg_ghost_cells_reply; + //* send our requests: std::vector requests(level_ghost_owners.size()); { @@ -4447,7 +4452,7 @@ namespace internal it.second.size() * sizeof(it.second[0]), MPI_BYTE, it.first, - 10101, + mpi_tag, tria.get_communicator(), &requests[idx]); AssertThrowMPI(ierr); @@ -4465,16 +4470,18 @@ namespace internal for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) { MPI_Status status; - int len; int ierr = MPI_Probe(MPI_ANY_SOURCE, - 10101, + mpi_tag, tria.get_communicator(), &status); AssertThrowMPI(ierr); + + int len; ierr = MPI_Get_count(&status, MPI_BYTE, &len); AssertThrowMPI(ierr); Assert(len % sizeof(quadrant_data_to_send[idx][0]) == 0, ExcInternalError()); + const unsigned int n_cells = len / sizeof(quadrant_data_to_send[idx][0]); quadrant_data_to_send[idx].resize(n_cells); @@ -4519,7 +4526,7 @@ namespace internal send_dof_numbers_and_indices[idx].size(), DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE, - 10102, + mpi_tag_reply, tria.get_communicator(), &reply_requests[idx]); AssertThrowMPI(ierr); @@ -4529,12 +4536,12 @@ namespace internal for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) { MPI_Status status; - int len; int ierr = MPI_Probe(MPI_ANY_SOURCE, - 10102, + mpi_tag_reply, tria.get_communicator(), &status); AssertThrowMPI(ierr); + int len; ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); const QuadrantBufferType &quadrants = neighbor_cell_list[status.MPI_SOURCE]; diff --git a/source/lac/sparsity_tools.cc b/source/lac/sparsity_tools.cc index a3a7584e4e..e0e1e8d35d 100644 --- a/source/lac/sparsity_tools.cc +++ b/source/lac/sparsity_tools.cc @@ -1180,6 +1180,9 @@ namespace SparsityTools static Utilities::MPI::CollectiveMutex mutex; Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, mpi_comm); + const int mpi_tag = Utilities::MPI::internal::Tags:: + sparsity_tools_distribute_sparsity_pattern; + { unsigned int idx = 0; for (const auto &sparsity_line : send_data) @@ -1189,7 +1192,7 @@ namespace SparsityTools sparsity_line.second.size(), DEAL_II_DOF_INDEX_MPI_TYPE, sparsity_line.first, - 124, + mpi_tag, mpi_comm, &requests[idx++]); AssertThrowMPI(ierr); @@ -1202,12 +1205,13 @@ namespace SparsityTools for (unsigned int index = 0; index < num_receive; ++index) { MPI_Status status; - int len; - int ierr = MPI_Probe(MPI_ANY_SOURCE, 124, mpi_comm, &status); + int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, mpi_comm, &status); AssertThrowMPI(ierr); + int len; ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); AssertThrowMPI(ierr); + recv_buf.resize(len); ierr = MPI_Recv(recv_buf.data(), len, diff --git a/source/multigrid/mg_transfer_internal.cc b/source/multigrid/mg_transfer_internal.cc index a11aa5b66c..2e5330aaf9 100644 --- a/source/multigrid/mg_transfer_internal.cc +++ b/source/multigrid/mg_transfer_internal.cc @@ -296,6 +296,9 @@ namespace internal Utilities::MPI::CollectiveMutex::ScopedLock lock( mutex, tria->get_communicator()); + const int mpi_tag = + Utilities::MPI::internal::Tags::mg_transfer_fill_copy_indices; + // * send std::vector requests; { @@ -312,7 +315,7 @@ namespace internal data.size() * sizeof(data[0]), MPI_BYTE, dest, - 71, + mpi_tag, tria->get_communicator(), &*requests.rbegin()); AssertThrowMPI(ierr); @@ -323,7 +326,7 @@ namespace internal 0, MPI_BYTE, dest, - 71, + mpi_tag, tria->get_communicator(), &*requests.rbegin()); AssertThrowMPI(ierr); @@ -339,12 +342,12 @@ namespace internal ++counter) { MPI_Status status; - int len; int ierr = MPI_Probe(MPI_ANY_SOURCE, - 71, + mpi_tag, tria->get_communicator(), &status); AssertThrowMPI(ierr); + int len; ierr = MPI_Get_count(&status, MPI_BYTE, &len); AssertThrowMPI(ierr);