introduce Utilities::MPI::internal::Tags with a list of unique MPI tags.
part of #8958
#include <deal.II/base/config.h>
#include <deal.II/base/array_view.h>
+#include <deal.II/base/mpi_tags.h>
#include <deal.II/base/numbers.h>
#include <map>
class ConsensusAlgorithm_NBX : public ConsensusAlgorithm<T1, T2>
{
public:
- // Unique tags to be used during Isend and Irecv
- static const unsigned int tag_request = 12;
- static const unsigned int tag_delivery = 13;
-
/**
* Constructor.
*
class ConsensusAlgorithm_PEX : public ConsensusAlgorithm<T1, T2>
{
public:
- // Unique tags to be used during Isend and Irecv
- static const unsigned int tag_request = 14;
- static const unsigned int tag_delivery = 15;
-
/**
* Constructor.
*
static CollectiveMutex mutex;
CollectiveMutex::ScopedLock lock(mutex, comm);
+ const int mpi_tag =
+ internal::Tags::compute_point_to_point_communication_pattern;
+
// Sending buffers
std::vector<std::vector<char>> buffers_to_send(send_to.size());
std::vector<MPI_Request> buffer_send_requests(send_to.size());
buffers_to_send[i].size(),
MPI_CHAR,
rank,
- 21,
+ mpi_tag,
comm,
&buffer_send_requests[i]);
AssertThrowMPI(ierr);
{
// Probe what's going on. Take data from the first available sender
MPI_Status status;
- int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
AssertThrowMPI(ierr);
// Length of the message
const unsigned int rank = status.MPI_SOURCE;
// Actually receive the message
- ierr = MPI_Recv(
- buffer.data(), len, MPI_CHAR, rank, 21, comm, MPI_STATUS_IGNORE);
+ ierr = MPI_Recv(buffer.data(),
+ len,
+ MPI_CHAR,
+ status.MPI_SOURCE,
+ status.MPI_TAG,
+ comm,
+ MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
Assert(received_objects.find(rank) == received_objects.end(),
ExcInternalError(
*/
struct Dictionary
{
- /**
- * A tag attached to the MPI communication during the dictionary
- * lookup
- */
- static const unsigned int tag_setup = 11;
-
/**
* The minimum grain size for the ranges.
*/
static CollectiveMutex mutex;
CollectiveMutex::ScopedLock lock(mutex, comm);
+ const int mpi_tag =
+ Utilities::MPI::internal::Tags::dictionary_reinit;
+
n_dict_procs_in_owned_indices = buffers.size();
std::vector<MPI_Request> request;
request.reserve(n_dict_procs_in_owned_indices);
rank_pair.second.size() * 2,
DEAL_II_DOF_INDEX_MPI_TYPE,
rank_pair.first,
- tag_setup,
+ mpi_tag,
comm,
&request.back());
AssertThrowMPI(ierr);
{
// wait for an incoming message
MPI_Status status;
- auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_setup, comm, &status);
+ auto ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
AssertThrowMPI(ierr);
// retrieve size of incoming message
ierr = MPI_Recv(buffer.data(),
number_amount,
DEAL_II_DOF_INDEX_MPI_TYPE,
- other_rank,
- tag_setup,
+ status.MPI_TAG,
+ status.MPI_SOURCE,
comm,
&status);
AssertThrowMPI(ierr);
static CollectiveMutex mutex;
CollectiveMutex::ScopedLock lock(mutex, comm);
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ consensus_algorithm_payload_get_requesters;
+
// reserve enough slots for the requests ahead; depending on
// whether the owning rank is one of the requesters or not, we
// might have one less requests to execute, so fill the requests
send_data[i].size(),
MPI_UNSIGNED,
dict.actually_owning_rank_list[i],
- 1021,
+ mpi_tag,
comm,
&send_requests.back());
AssertThrowMPI(ierr);
// wait for an incoming message
MPI_Status status;
unsigned int ierr =
- MPI_Probe(MPI_ANY_SOURCE, 1021, comm, &status);
+ MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
AssertThrowMPI(ierr);
// retrieve size of incoming message
number_amount,
MPI_UNSIGNED,
status.MPI_SOURCE,
- 1021,
+ status.MPI_TAG,
comm,
&status);
AssertThrowMPI(ierr);
static Utilities::MPI::CollectiveMutex mutex;
Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, communicator);
+ // We pick a new tag in each round. Wrap around after 10 rounds:
+ const int mpi_tag =
+ Utilities::MPI::internal::Tags::fe_tools_extrapolate + round % 10;
+
// send data
unsigned int idx = 0;
for (typename std::vector<CellData>::const_iterator it =
buffer->size(),
MPI_BYTE,
it->receiver,
- round,
+ mpi_tag,
communicator,
&requests[idx]);
AssertThrowMPI(ierr);
for (unsigned int index = 0; index < n_senders; ++index)
{
MPI_Status status;
- int len;
- int ierr = MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, communicator, &status);
AssertThrowMPI(ierr);
+
+ int len;
ierr = MPI_Get_count(&status, MPI_BYTE, &len);
AssertThrowMPI(ierr);
receive.resize(len);
Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex,
tria->get_communicator());
+ const int mpi_tag =
+ Utilities::MPI::internal::Tags::exchange_cell_data_to_ghosts;
+
// 2. send our messages
std::set<dealii::types::subdomain_id> ghost_owners = tria->ghost_owners();
const unsigned int n_ghost_owners = ghost_owners.size();
sendbuffers[idx].size(),
MPI_BYTE,
*it,
- 786,
+ mpi_tag,
tria->get_communicator(),
&requests[idx]);
AssertThrowMPI(ierr);
for (unsigned int idx = 0; idx < n_ghost_owners; ++idx)
{
MPI_Status status;
- int len;
int ierr =
- MPI_Probe(MPI_ANY_SOURCE, 786, tria->get_communicator(), &status);
+ MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status);
AssertThrowMPI(ierr);
+
+ int len;
ierr = MPI_Get_count(&status, MPI_BYTE, &len);
AssertThrowMPI(ierr);
#include <deal.II/base/mpi.h>
#include <deal.II/base/mpi.templates.h>
#include <deal.II/base/mpi_compute_index_owner_internal.h>
+#include <deal.II/base/mpi_tags.h>
#include <deal.II/base/multithread_info.h>
#include <deal.II/base/utilities.h>
static CollectiveMutex mutex;
CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
+ const int mpi_tag =
+ internal::Tags::compute_point_to_point_communication_pattern;
+
// Calculate the number of messages to send to each process
std::vector<unsigned int> dest_vector(n_procs);
for (const auto &el : destinations)
1,
MPI_UNSIGNED,
el,
- 32766,
+ mpi_tag,
mpi_comm,
send_requests.data() + (&el - destinations.data()));
AssertThrowMPI(ierr);
1,
MPI_UNSIGNED,
MPI_ANY_SOURCE,
- 32766,
+ mpi_tag,
mpi_comm,
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
ConsensusAlgorithm_NBX<T1, T2>::process_requests()
{
#ifdef DEAL_II_WITH_MPI
+
+ const int tag_request =
+ Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_request;
+ const int tag_deliver =
+ Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_deliver;
+
// check if there is a request pending
MPI_Status status;
int request_is_pending;
request_buffer.size() * sizeof(T2),
MPI_BYTE,
other_rank,
- tag_delivery,
+ tag_deliver,
this->comm,
request_requests.back().get());
AssertThrowMPI(ierr);
targets = this->process.compute_targets();
const auto n_targets = targets.size();
+ const int tag_request =
+ Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_request;
+ const int tag_deliver =
+ Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_deliver;
+
// 2) allocate memory
recv_buffers.resize(n_targets);
recv_requests.resize(n_targets);
recv_buffer.size() * sizeof(T2),
MPI_BYTE,
rank,
- tag_delivery,
+ tag_deliver,
this->comm,
&recv_requests[index]);
AssertThrowMPI(ierr);
ConsensusAlgorithm_PEX<T1, T2>::process_requests(int index)
{
#ifdef DEAL_II_WITH_MPI
+ const int tag_request =
+ Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_request;
+ const int tag_deliver =
+ Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_deliver;
+
MPI_Status status;
MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
request_buffer.size() * sizeof(T2),
MPI_BYTE,
other_rank,
- tag_delivery,
+ tag_deliver,
this->comm,
&requests_answers[index]);
AssertThrowMPI(ierr);
// 1) determine with which processes this process wants to communicate
targets = this->process.compute_targets();
+ const int tag_request =
+ Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_request;
+ const int tag_deliver =
+ Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_deliver;
+
// 2) determine who wants to communicate with this process
const bool use_nbx = false;
if (!use_nbx)
recv_buffer.size() * sizeof(T2),
MPI_BYTE,
rank,
- tag_delivery,
+ tag_deliver,
this->comm,
&send_and_recv_buffers[i]);
AssertThrowMPI(ierr);
Utilities::MPI::CollectiveMutex::ScopedLock lock(
mutex, this->get_communicator());
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ triangulation_communicate_locally_moved_vertices;
+
std::vector<std::vector<char>> sendbuffers(needs_to_get_cells.size());
std::vector<std::vector<char>>::iterator buffer = sendbuffers.begin();
std::vector<MPI_Request> requests(needs_to_get_cells.size());
buffer->size(),
MPI_BYTE,
it->first,
- 123,
+ mpi_tag,
this->get_communicator(),
&requests[idx]);
AssertThrowMPI(ierr);
for (unsigned int i = 0; i < n_senders; ++i)
{
MPI_Status status;
- int len;
- int ierr =
- MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE,
+ mpi_tag,
+ this->get_communicator(),
+ &status);
AssertThrowMPI(ierr);
+
+ int len;
ierr = MPI_Get_count(&status, MPI_BYTE, &len);
AssertThrowMPI(ierr);
receive.resize(len);
Utilities::MPI::CollectiveMutex::ScopedLock lock(
mutex, tria.get_communicator());
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ dofhandler_communicate_mg_ghost_cells;
+ const int mpi_tag_reply = Utilities::MPI::internal::Tags::
+ dofhandler_communicate_mg_ghost_cells_reply;
+
//* send our requests:
std::vector<MPI_Request> requests(level_ghost_owners.size());
{
it.second.size() * sizeof(it.second[0]),
MPI_BYTE,
it.first,
- 10101,
+ mpi_tag,
tria.get_communicator(),
&requests[idx]);
AssertThrowMPI(ierr);
for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx)
{
MPI_Status status;
- int len;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
- 10101,
+ mpi_tag,
tria.get_communicator(),
&status);
AssertThrowMPI(ierr);
+
+ int len;
ierr = MPI_Get_count(&status, MPI_BYTE, &len);
AssertThrowMPI(ierr);
Assert(len % sizeof(quadrant_data_to_send[idx][0]) == 0,
ExcInternalError());
+
const unsigned int n_cells =
len / sizeof(quadrant_data_to_send[idx][0]);
quadrant_data_to_send[idx].resize(n_cells);
send_dof_numbers_and_indices[idx].size(),
DEAL_II_DOF_INDEX_MPI_TYPE,
status.MPI_SOURCE,
- 10102,
+ mpi_tag_reply,
tria.get_communicator(),
&reply_requests[idx]);
AssertThrowMPI(ierr);
for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx)
{
MPI_Status status;
- int len;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
- 10102,
+ mpi_tag_reply,
tria.get_communicator(),
&status);
AssertThrowMPI(ierr);
+ int len;
ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
const QuadrantBufferType &quadrants =
neighbor_cell_list[status.MPI_SOURCE];
static Utilities::MPI::CollectiveMutex mutex;
Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ sparsity_tools_distribute_sparsity_pattern;
+
{
unsigned int idx = 0;
for (const auto &sparsity_line : send_data)
sparsity_line.second.size(),
DEAL_II_DOF_INDEX_MPI_TYPE,
sparsity_line.first,
- 124,
+ mpi_tag,
mpi_comm,
&requests[idx++]);
AssertThrowMPI(ierr);
for (unsigned int index = 0; index < num_receive; ++index)
{
MPI_Status status;
- int len;
- int ierr = MPI_Probe(MPI_ANY_SOURCE, 124, mpi_comm, &status);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, mpi_comm, &status);
AssertThrowMPI(ierr);
+ int len;
ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
AssertThrowMPI(ierr);
+
recv_buf.resize(len);
ierr = MPI_Recv(recv_buf.data(),
len,
Utilities::MPI::CollectiveMutex::ScopedLock lock(
mutex, tria->get_communicator());
+ const int mpi_tag =
+ Utilities::MPI::internal::Tags::mg_transfer_fill_copy_indices;
+
// * send
std::vector<MPI_Request> requests;
{
data.size() * sizeof(data[0]),
MPI_BYTE,
dest,
- 71,
+ mpi_tag,
tria->get_communicator(),
&*requests.rbegin());
AssertThrowMPI(ierr);
0,
MPI_BYTE,
dest,
- 71,
+ mpi_tag,
tria->get_communicator(),
&*requests.rbegin());
AssertThrowMPI(ierr);
++counter)
{
MPI_Status status;
- int len;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
- 71,
+ mpi_tag,
tria->get_communicator(),
&status);
AssertThrowMPI(ierr);
+ int len;
ierr = MPI_Get_count(&status, MPI_BYTE, &len);
AssertThrowMPI(ierr);