From cccfa47cfbcfb02306156a64269360bd6fc26f7f Mon Sep 17 00:00:00 2001
From: Timo Heister <timo.heister@gmail.com>
Date: Wed, 6 Nov 2019 14:58:18 -0500
Subject: [PATCH] add many other MPI tags

---
 include/deal.II/base/mpi_tags.h               | 51 ++++++++++++++++---
 source/base/process_grid.cc                   |  5 +-
 .../fully_distributed_tria_util.cc            | 24 +++++++--
 source/distributed/tria_base.cc               |  7 ++-
 source/grid/grid_tools.cc                     | 15 ++++--
 source/lac/scalapack.cc                       | 14 +++--
 source/particles/particle_handler.cc          | 14 +++--
 7 files changed, 102 insertions(+), 28 deletions(-)

diff --git a/include/deal.II/base/mpi_tags.h b/include/deal.II/base/mpi_tags.h
index 728d75e0ac..cc5bc2b057 100644
--- a/include/deal.II/base/mpi_tags.h
+++ b/include/deal.II/base/mpi_tags.h
@@ -28,14 +28,22 @@ namespace Utilities
     namespace internal
     {
       /**
-       * This enum holds all MPI tags used in collective MPI communications with
-       * using MPI_ANY_SOURCE inside the deal.II library.
+       * This enum holds all MPI tags used in point to point MPI communications
+       * inside the deal.II library.
        *
        * We keep these tags in a central location so that they are unique within
-       * the library. Otherwise, communication that receives packages with
-       * MPI_ANY_SOURCE might pick up packets from a different algorithm.
+       * the library. Otherwise, communication that receives packages might pick
+       * up packets from a different algorithm. This is especially true if
+       * MPI_ANY_SOURCE is used.
+       *
+       * The list of MPI functions that use an MPI tag is:
+       * - MPI_Send, MPI_Recv, MPI_Sendrecv
+       * - MPI_Isend, MPI_Irecv
+       * - MPI_Probe, MPI_Iprobe
+       * - MPI_Comm_create_group, MPI_Intercomm_create,
+       * Utilities::MPI::create_group
        */
-      struct Tags
+      namespace Tags
       {
         /**
          * The enum with the tags.
@@ -87,10 +95,37 @@ namespace Utilities
           /// ConsensusAlgorithm_PEX::process
           consensus_algorithm_pex_process_deliver,
 
+          /// ConstructionData<dim,
+          /// spacedim>::create_construction_data_from_triangulation_in_groups()
+          fully_distributed_create,
+
+          /// TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
+          triangulation_base_fill_level_ghost_owners,
+
+          /// GridTools::compute_local_to_global_vertex_index_map
+          grid_tools_compute_local_to_global_vertex_index_map,
+          /// GridTools::compute_local_to_global_vertex_index_map second tag
+          grid_tools_compute_local_to_global_vertex_index_map2,
+
+          /// ParticleHandler<dim, spacedim>::send_recv_particles
+          particle_handler_send_recv_particles_setup,
+          /// ParticleHandler<dim, spacedim>::send_recv_particles
+          particle_handler_send_recv_particles_send,
+
+          /// ScaLAPACKMatrix<NumberType>::copy_to
+          scalapack_copy_to,
+          /// ScaLAPACKMatrix<NumberType>::copy_to
+          scalapack_copy_to2,
+          /// ScaLAPACKMatrix<NumberType>::copy_from
+          scalapack_copy_from,
+
+          /// ProcessGrid::ProcessGrid
+          process_grid_constructor,
+
         };
-      };
-    } // namespace internal
-  }   // namespace MPI
+      } // namespace Tags
+    }   // namespace internal
+  }     // namespace MPI
 } // namespace Utilities
 
 
diff --git a/source/base/process_grid.cc b/source/base/process_grid.cc
index b9cc3149d5..609b8e2741 100644
--- a/source/base/process_grid.cc
+++ b/source/base/process_grid.cc
@@ -181,9 +181,12 @@ namespace Utilities
       // Note that on all the active MPI processes (except for the one with
       // rank 0) the resulting MPI_Comm mpi_communicator_inactive_with_root
       // will be MPI_COMM_NULL.
+      const int mpi_tag =
+        Utilities::MPI::internal::Tags::process_grid_constructor;
+
       ierr = Utilities::MPI::create_group(mpi_communicator,
                                           inactive_with_root_group,
-                                          55,
+                                          mpi_tag,
                                           &mpi_communicator_inactive_with_root);
       AssertThrowMPI(ierr);
 
diff --git a/source/distributed/fully_distributed_tria_util.cc b/source/distributed/fully_distributed_tria_util.cc
index d7acbeaa7a..03ec3a0889 100644
--- a/source/distributed/fully_distributed_tria_util.cc
+++ b/source/distributed/fully_distributed_tria_util.cc
@@ -577,6 +577,9 @@ namespace parallel
           dealii::Utilities::MPI::this_mpi_process(comm);
         const unsigned int group_root = (my_rank / group_size) * group_size;
 
+        const int mpi_tag =
+          Utilities::MPI::internal::Tags::fully_distributed_create;
+
         // check if process is root of the group
         if (my_rank == group_root)
           {
@@ -613,8 +616,12 @@ namespace parallel
                 dealii::Utilities::pack(construction_data, buffer, false);
 
                 // 3c) send ConstructionData
-                const auto ierr = MPI_Send(
-                  buffer.data(), buffer.size(), MPI_CHAR, other_rank, 0, comm);
+                const auto ierr = MPI_Send(buffer.data(),
+                                           buffer.size(),
+                                           MPI_CHAR,
+                                           other_rank,
+                                           mpi_tag,
+                                           comm);
                 AssertThrowMPI(ierr);
               }
 
@@ -627,13 +634,20 @@ namespace parallel
             // 3a) recv packed ConstructionData from group-root process
             //     (counter-part of 3c of root process)
             MPI_Status status;
-            auto       ierr = MPI_Probe(group_root, 0, comm, &status);
+            auto       ierr = MPI_Probe(group_root, mpi_tag, comm, &status);
             AssertThrowMPI(ierr);
+
             int len;
             MPI_Get_count(&status, MPI_CHAR, &len);
+
             std::vector<char> buf(len);
-            ierr =
-              MPI_Recv(buf.data(), len, MPI_CHAR, group_root, 0, comm, &status);
+            ierr = MPI_Recv(buf.data(),
+                            len,
+                            MPI_CHAR,
+                            status.MPI_SOURCE,
+                            status.MPI_TAG,
+                            comm,
+                            &status);
             AssertThrowMPI(ierr);
 
             // 3b) unpack ConstructionData (counter-part of 3b of root process)
diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc
index ab57bb0e6e..4ec55441fe 100644
--- a/source/distributed/tria_base.cc
+++ b/source/distributed/tria_base.cc
@@ -253,6 +253,9 @@ namespace parallel
       int ierr = MPI_Barrier(this->mpi_communicator);
       AssertThrowMPI(ierr);
 
+      const int mpi_tag = Utilities::MPI::internal::Tags::
+        triangulation_base_fill_level_ghost_owners;
+
       // important: preallocate to avoid (re)allocation:
       std::vector<MPI_Request> requests(
         this->number_cache.level_ghost_owners.size());
@@ -268,7 +271,7 @@ namespace parallel
                            1,
                            MPI_UNSIGNED,
                            *it,
-                           9001,
+                           mpi_tag,
                            this->mpi_communicator,
                            &requests[req_counter]);
           AssertThrowMPI(ierr);
@@ -284,7 +287,7 @@ namespace parallel
                           1,
                           MPI_UNSIGNED,
                           *it,
-                          9001,
+                          mpi_tag,
                           this->mpi_communicator,
                           MPI_STATUS_IGNORE);
           AssertThrowMPI(ierr);
diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc
index c0cb7035df..a328fcfe64 100644
--- a/source/grid/grid_tools.cc
+++ b/source/grid/grid_tools.cc
@@ -2498,6 +2498,13 @@ namespace GridTools
     for (; global_index_it != global_index_end; ++global_index_it)
       global_index_it->second += shift;
 
+
+    const int mpi_tag = Utilities::MPI::internal::Tags::
+      grid_tools_compute_local_to_global_vertex_index_map;
+    const int mpi_tag2 = Utilities::MPI::internal::Tags::
+      grid_tools_compute_local_to_global_vertex_index_map2;
+
+
     // In a first message, send the global ID of the vertices and the local
     // positions in the cells. In a second messages, send the cell ID as a
     // resize string. This is done in two messages so that types are not mixed
@@ -2535,7 +2542,7 @@ namespace GridTools
                          buffer_size,
                          DEAL_II_VERTEX_INDEX_MPI_TYPE,
                          destination,
-                         0,
+                         mpi_tag,
                          triangulation.get_communicator(),
                          &first_requests[i]);
         AssertThrowMPI(ierr);
@@ -2560,7 +2567,7 @@ namespace GridTools
                         buffer_size,
                         DEAL_II_VERTEX_INDEX_MPI_TYPE,
                         source,
-                        0,
+                        mpi_tag,
                         triangulation.get_communicator(),
                         MPI_STATUS_IGNORE);
         AssertThrowMPI(ierr);
@@ -2601,7 +2608,7 @@ namespace GridTools
                          buffer_size,
                          MPI_CHAR,
                          destination,
-                         0,
+                         mpi_tag2,
                          triangulation.get_communicator(),
                          &second_requests[i]);
         AssertThrowMPI(ierr);
@@ -2624,7 +2631,7 @@ namespace GridTools
                         buffer_size,
                         MPI_CHAR,
                         source,
-                        0,
+                        mpi_tag2,
                         triangulation.get_communicator(),
                         MPI_STATUS_IGNORE);
         AssertThrowMPI(ierr);
diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc
index 3334cf4525..4b729718a0 100644
--- a/source/lac/scalapack.cc
+++ b/source/lac/scalapack.cc
@@ -392,9 +392,11 @@ ScaLAPACKMatrix<NumberType>::copy_from(const LAPACKFullMatrix<NumberType> &B,
   MPI_Group              group_B;
   MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
   MPI_Comm communicator_B;
+
+  const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_from;
   Utilities::MPI::create_group(this->grid->mpi_communicator,
                                group_B,
-                               0,
+                               mpi_tag,
                                &communicator_B);
   int n_proc_rows_B = 1, n_proc_cols_B = 1;
   int this_process_row_B = -1, this_process_column_B = -1;
@@ -560,9 +562,11 @@ ScaLAPACKMatrix<NumberType>::copy_to(LAPACKFullMatrix<NumberType> &B,
   MPI_Group              group_B;
   MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
   MPI_Comm communicator_B;
+
+  const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_to;
   Utilities::MPI::create_group(this->grid->mpi_communicator,
                                group_B,
-                               0,
+                               mpi_tag,
                                &communicator_B);
   int n_proc_rows_B = 1, n_proc_cols_B = 1;
   int this_process_row_B = -1, this_process_column_B = -1;
@@ -893,9 +897,11 @@ ScaLAPACKMatrix<NumberType>::copy_to(ScaLAPACKMatrix<NumberType> &dest) const
       // first argument, even if the program we are currently running
       // and that is calling this function only works on a subset of
       // processes. the same holds for the wrapper/fallback we are using here.
-      ierr = Utilities::MPI::create_group(MPI_COMM_WORLD,
+
+      const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_to2;
+      ierr              = Utilities::MPI::create_group(MPI_COMM_WORLD,
                                           group_union,
-                                          5,
+                                          mpi_tag,
                                           &mpi_communicator_union);
       AssertThrowMPI(ierr);
 
diff --git a/source/particles/particle_handler.cc b/source/particles/particle_handler.cc
index 95af0a1319..65aa37fbff 100644
--- a/source/particles/particle_handler.cc
+++ b/source/particles/particle_handler.cc
@@ -937,6 +937,9 @@ namespace Particles
 
     // Notify other processors how many particles we will send
     {
+      const int mpi_tag = Utilities::MPI::internal::Tags::
+        particle_handler_send_recv_particles_setup;
+
       std::vector<MPI_Request> n_requests(2 * n_neighbors);
       for (unsigned int i = 0; i < n_neighbors; ++i)
         {
@@ -944,7 +947,7 @@ namespace Particles
                                      1,
                                      MPI_UNSIGNED,
                                      neighbors[i],
-                                     0,
+                                     mpi_tag,
                                      triangulation->get_communicator(),
                                      &(n_requests[2 * i]));
           AssertThrowMPI(ierr);
@@ -955,7 +958,7 @@ namespace Particles
                                      1,
                                      MPI_UNSIGNED,
                                      neighbors[i],
-                                     0,
+                                     mpi_tag,
                                      triangulation->get_communicator(),
                                      &(n_requests[2 * i + 1]));
           AssertThrowMPI(ierr);
@@ -982,6 +985,9 @@ namespace Particles
       unsigned int             send_ops = 0;
       unsigned int             recv_ops = 0;
 
+      const int mpi_tag = Utilities::MPI::internal::Tags::
+        particle_handler_send_recv_particles_send;
+
       for (unsigned int i = 0; i < n_neighbors; ++i)
         if (n_recv_data[i] > 0)
           {
@@ -989,7 +995,7 @@ namespace Particles
                                        n_recv_data[i],
                                        MPI_CHAR,
                                        neighbors[i],
-                                       1,
+                                       mpi_tag,
                                        triangulation->get_communicator(),
                                        &(requests[send_ops]));
             AssertThrowMPI(ierr);
@@ -1003,7 +1009,7 @@ namespace Particles
                                        n_send_data[i],
                                        MPI_CHAR,
                                        neighbors[i],
-                                       1,
+                                       mpi_tag,
                                        triangulation->get_communicator(),
                                        &(requests[send_ops + recv_ops]));
             AssertThrowMPI(ierr);
-- 
2.39.5