namespace internal
{
/**
- * This enum holds all MPI tags used in collective MPI communications with
- * using MPI_ANY_SOURCE inside the deal.II library.
+ * This enum holds all MPI tags used in point to point MPI communications
+ * inside the deal.II library.
*
* We keep these tags in a central location so that they are unique within
- * the library. Otherwise, communication that receives packages with
- * MPI_ANY_SOURCE might pick up packets from a different algorithm.
+ * the library. Otherwise, communication that receives packages might pick
+ * up packets from a different algorithm. This is especially true if
+ * MPI_ANY_SOURCE is used.
+ *
+ * The list of MPI functions that use an MPI tag is:
+ * - MPI_Send, MPI_Recv, MPI_Sendrecv
+ * - MPI_Isend, MPI_Irecv
+ * - MPI_Probe, MPI_Iprobe
+ * - MPI_Comm_create_group, MPI_Intercomm_create,
+ * Utilities::MPI::create_group
*/
- struct Tags
+ namespace Tags
{
/**
* The enum with the tags.
/// ConsensusAlgorithm_PEX::process
consensus_algorithm_pex_process_deliver,
+ /// ConstructionData<dim,
+ /// spacedim>::create_construction_data_from_triangulation_in_groups()
+ fully_distributed_create,
+
+ /// TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
+ triangulation_base_fill_level_ghost_owners,
+
+ /// GridTools::compute_local_to_global_vertex_index_map
+ grid_tools_compute_local_to_global_vertex_index_map,
+ /// GridTools::compute_local_to_global_vertex_index_map second tag
+ grid_tools_compute_local_to_global_vertex_index_map2,
+
+ /// ParticleHandler<dim, spacedim>::send_recv_particles
+ particle_handler_send_recv_particles_setup,
+ /// ParticleHandler<dim, spacedim>::send_recv_particles
+ particle_handler_send_recv_particles_send,
+
+ /// ScaLAPACKMatrix<NumberType>::copy_to
+ scalapack_copy_to,
+ /// ScaLAPACKMatrix<NumberType>::copy_to
+ scalapack_copy_to2,
+ /// ScaLAPACKMatrix<NumberType>::copy_from
+ scalapack_copy_from,
+
+ /// ProcessGrid::ProcessGrid
+ process_grid_constructor,
+
};
- };
- } // namespace internal
- } // namespace MPI
+ } // namespace Tags
+ } // namespace internal
+ } // namespace MPI
} // namespace Utilities
// Note that on all the active MPI processes (except for the one with
// rank 0) the resulting MPI_Comm mpi_communicator_inactive_with_root
// will be MPI_COMM_NULL.
+ const int mpi_tag =
+ Utilities::MPI::internal::Tags::process_grid_constructor;
+
ierr = Utilities::MPI::create_group(mpi_communicator,
inactive_with_root_group,
- 55,
+ mpi_tag,
&mpi_communicator_inactive_with_root);
AssertThrowMPI(ierr);
dealii::Utilities::MPI::this_mpi_process(comm);
const unsigned int group_root = (my_rank / group_size) * group_size;
+ const int mpi_tag =
+ Utilities::MPI::internal::Tags::fully_distributed_create;
+
// check if process is root of the group
if (my_rank == group_root)
{
dealii::Utilities::pack(construction_data, buffer, false);
// 3c) send ConstructionData
- const auto ierr = MPI_Send(
- buffer.data(), buffer.size(), MPI_CHAR, other_rank, 0, comm);
+ const auto ierr = MPI_Send(buffer.data(),
+ buffer.size(),
+ MPI_CHAR,
+ other_rank,
+ mpi_tag,
+ comm);
AssertThrowMPI(ierr);
}
// 3a) recv packed ConstructionData from group-root process
// (counter-part of 3c of root process)
MPI_Status status;
- auto ierr = MPI_Probe(group_root, 0, comm, &status);
+ auto ierr = MPI_Probe(group_root, mpi_tag, comm, &status);
AssertThrowMPI(ierr);
+
int len;
MPI_Get_count(&status, MPI_CHAR, &len);
+
std::vector<char> buf(len);
- ierr =
- MPI_Recv(buf.data(), len, MPI_CHAR, group_root, 0, comm, &status);
+ ierr = MPI_Recv(buf.data(),
+ len,
+ MPI_CHAR,
+ status.MPI_SOURCE,
+ status.MPI_TAG,
+ comm,
+ &status);
AssertThrowMPI(ierr);
// 3b) unpack ConstructionData (counter-part of 3b of root process)
int ierr = MPI_Barrier(this->mpi_communicator);
AssertThrowMPI(ierr);
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ triangulation_base_fill_level_ghost_owners;
+
// important: preallocate to avoid (re)allocation:
std::vector<MPI_Request> requests(
this->number_cache.level_ghost_owners.size());
1,
MPI_UNSIGNED,
*it,
- 9001,
+ mpi_tag,
this->mpi_communicator,
&requests[req_counter]);
AssertThrowMPI(ierr);
1,
MPI_UNSIGNED,
*it,
- 9001,
+ mpi_tag,
this->mpi_communicator,
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
for (; global_index_it != global_index_end; ++global_index_it)
global_index_it->second += shift;
+
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ grid_tools_compute_local_to_global_vertex_index_map;
+ const int mpi_tag2 = Utilities::MPI::internal::Tags::
+ grid_tools_compute_local_to_global_vertex_index_map2;
+
+
// In a first message, send the global ID of the vertices and the local
// positions in the cells. In a second messages, send the cell ID as a
// resize string. This is done in two messages so that types are not mixed
buffer_size,
DEAL_II_VERTEX_INDEX_MPI_TYPE,
destination,
- 0,
+ mpi_tag,
triangulation.get_communicator(),
&first_requests[i]);
AssertThrowMPI(ierr);
buffer_size,
DEAL_II_VERTEX_INDEX_MPI_TYPE,
source,
- 0,
+ mpi_tag,
triangulation.get_communicator(),
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
buffer_size,
MPI_CHAR,
destination,
- 0,
+ mpi_tag2,
triangulation.get_communicator(),
&second_requests[i]);
AssertThrowMPI(ierr);
buffer_size,
MPI_CHAR,
source,
- 0,
+ mpi_tag2,
triangulation.get_communicator(),
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
MPI_Group group_B;
MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
MPI_Comm communicator_B;
+
+ const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_from;
Utilities::MPI::create_group(this->grid->mpi_communicator,
group_B,
- 0,
+ mpi_tag,
&communicator_B);
int n_proc_rows_B = 1, n_proc_cols_B = 1;
int this_process_row_B = -1, this_process_column_B = -1;
MPI_Group group_B;
MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
MPI_Comm communicator_B;
+
+ const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_to;
Utilities::MPI::create_group(this->grid->mpi_communicator,
group_B,
- 0,
+ mpi_tag,
&communicator_B);
int n_proc_rows_B = 1, n_proc_cols_B = 1;
int this_process_row_B = -1, this_process_column_B = -1;
// first argument, even if the program we are currently running
// and that is calling this function only works on a subset of
// processes. the same holds for the wrapper/fallback we are using here.
- ierr = Utilities::MPI::create_group(MPI_COMM_WORLD,
+
+ const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_to2;
+ ierr = Utilities::MPI::create_group(MPI_COMM_WORLD,
group_union,
- 5,
+ mpi_tag,
&mpi_communicator_union);
AssertThrowMPI(ierr);
// Notify other processors how many particles we will send
{
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ particle_handler_send_recv_particles_setup;
+
std::vector<MPI_Request> n_requests(2 * n_neighbors);
for (unsigned int i = 0; i < n_neighbors; ++i)
{
1,
MPI_UNSIGNED,
neighbors[i],
- 0,
+ mpi_tag,
triangulation->get_communicator(),
&(n_requests[2 * i]));
AssertThrowMPI(ierr);
1,
MPI_UNSIGNED,
neighbors[i],
- 0,
+ mpi_tag,
triangulation->get_communicator(),
&(n_requests[2 * i + 1]));
AssertThrowMPI(ierr);
unsigned int send_ops = 0;
unsigned int recv_ops = 0;
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ particle_handler_send_recv_particles_send;
+
for (unsigned int i = 0; i < n_neighbors; ++i)
if (n_recv_data[i] > 0)
{
n_recv_data[i],
MPI_CHAR,
neighbors[i],
- 1,
+ mpi_tag,
triangulation->get_communicator(),
&(requests[send_ops]));
AssertThrowMPI(ierr);
n_send_data[i],
MPI_CHAR,
neighbors[i],
- 1,
+ mpi_tag,
triangulation->get_communicator(),
&(requests[send_ops + recv_ops]));
AssertThrowMPI(ierr);