]> https://gitweb.dealii.org/ - dealii.git/commitdiff
add many other MPI tags
authorTimo Heister <timo.heister@gmail.com>
Wed, 6 Nov 2019 19:58:18 +0000 (14:58 -0500)
committerTimo Heister <timo.heister@gmail.com>
Wed, 6 Nov 2019 19:58:18 +0000 (14:58 -0500)
include/deal.II/base/mpi_tags.h
source/base/process_grid.cc
source/distributed/fully_distributed_tria_util.cc
source/distributed/tria_base.cc
source/grid/grid_tools.cc
source/lac/scalapack.cc
source/particles/particle_handler.cc

index 728d75e0ac482c32fc7db51d4b5a7c2de0761237..cc5bc2b05735126087f8f0473e5c8f0a728a79a7 100644 (file)
@@ -28,14 +28,22 @@ namespace Utilities
     namespace internal
     {
       /**
-       * This enum holds all MPI tags used in collective MPI communications with
-       * using MPI_ANY_SOURCE inside the deal.II library.
+       * This enum holds all MPI tags used in point to point MPI communications
+       * inside the deal.II library.
        *
        * We keep these tags in a central location so that they are unique within
-       * the library. Otherwise, communication that receives packages with
-       * MPI_ANY_SOURCE might pick up packets from a different algorithm.
+       * the library. Otherwise, communication that receives packages might pick
+       * up packets from a different algorithm. This is especially true if
+       * MPI_ANY_SOURCE is used.
+       *
+       * The list of MPI functions that use an MPI tag is:
+       * - MPI_Send, MPI_Recv, MPI_Sendrecv
+       * - MPI_Isend, MPI_Irecv
+       * - MPI_Probe, MPI_Iprobe
+       * - MPI_Comm_create_group, MPI_Intercomm_create,
+       * Utilities::MPI::create_group
        */
-      struct Tags
+      namespace Tags
       {
         /**
          * The enum with the tags.
@@ -87,10 +95,37 @@ namespace Utilities
           /// ConsensusAlgorithm_PEX::process
           consensus_algorithm_pex_process_deliver,
 
+          /// ConstructionData<dim,
+          /// spacedim>::create_construction_data_from_triangulation_in_groups()
+          fully_distributed_create,
+
+          /// TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
+          triangulation_base_fill_level_ghost_owners,
+
+          /// GridTools::compute_local_to_global_vertex_index_map
+          grid_tools_compute_local_to_global_vertex_index_map,
+          /// GridTools::compute_local_to_global_vertex_index_map second tag
+          grid_tools_compute_local_to_global_vertex_index_map2,
+
+          /// ParticleHandler<dim, spacedim>::send_recv_particles
+          particle_handler_send_recv_particles_setup,
+          /// ParticleHandler<dim, spacedim>::send_recv_particles
+          particle_handler_send_recv_particles_send,
+
+          /// ScaLAPACKMatrix<NumberType>::copy_to
+          scalapack_copy_to,
+          /// ScaLAPACKMatrix<NumberType>::copy_to
+          scalapack_copy_to2,
+          /// ScaLAPACKMatrix<NumberType>::copy_from
+          scalapack_copy_from,
+
+          /// ProcessGrid::ProcessGrid
+          process_grid_constructor,
+
         };
-      };
-    } // namespace internal
-  }   // namespace MPI
+      } // namespace Tags
+    }   // namespace internal
+  }     // namespace MPI
 } // namespace Utilities
 
 
index b9cc3149d5279209aa055caa745cb75ae54411ba..609b8e2741ee5b2cc0721c0f2a9e34a63c52aed9 100644 (file)
@@ -181,9 +181,12 @@ namespace Utilities
       // Note that on all the active MPI processes (except for the one with
       // rank 0) the resulting MPI_Comm mpi_communicator_inactive_with_root
       // will be MPI_COMM_NULL.
+      const int mpi_tag =
+        Utilities::MPI::internal::Tags::process_grid_constructor;
+
       ierr = Utilities::MPI::create_group(mpi_communicator,
                                           inactive_with_root_group,
-                                          55,
+                                          mpi_tag,
                                           &mpi_communicator_inactive_with_root);
       AssertThrowMPI(ierr);
 
index d7acbeaa7a38d77c11059a90554edf0c69104687..03ec3a0889d1a8e0edd711e17715b609f79a531a 100644 (file)
@@ -577,6 +577,9 @@ namespace parallel
           dealii::Utilities::MPI::this_mpi_process(comm);
         const unsigned int group_root = (my_rank / group_size) * group_size;
 
+        const int mpi_tag =
+          Utilities::MPI::internal::Tags::fully_distributed_create;
+
         // check if process is root of the group
         if (my_rank == group_root)
           {
@@ -613,8 +616,12 @@ namespace parallel
                 dealii::Utilities::pack(construction_data, buffer, false);
 
                 // 3c) send ConstructionData
-                const auto ierr = MPI_Send(
-                  buffer.data(), buffer.size(), MPI_CHAR, other_rank, 0, comm);
+                const auto ierr = MPI_Send(buffer.data(),
+                                           buffer.size(),
+                                           MPI_CHAR,
+                                           other_rank,
+                                           mpi_tag,
+                                           comm);
                 AssertThrowMPI(ierr);
               }
 
@@ -627,13 +634,20 @@ namespace parallel
             // 3a) recv packed ConstructionData from group-root process
             //     (counter-part of 3c of root process)
             MPI_Status status;
-            auto       ierr = MPI_Probe(group_root, 0, comm, &status);
+            auto       ierr = MPI_Probe(group_root, mpi_tag, comm, &status);
             AssertThrowMPI(ierr);
+
             int len;
             MPI_Get_count(&status, MPI_CHAR, &len);
+
             std::vector<char> buf(len);
-            ierr =
-              MPI_Recv(buf.data(), len, MPI_CHAR, group_root, 0, comm, &status);
+            ierr = MPI_Recv(buf.data(),
+                            len,
+                            MPI_CHAR,
+                            status.MPI_SOURCE,
+                            status.MPI_TAG,
+                            comm,
+                            &status);
             AssertThrowMPI(ierr);
 
             // 3b) unpack ConstructionData (counter-part of 3b of root process)
index ab57bb0e6e8c34b9541c9c46158ce3aab4d1bf8e..4ec55441fefaa030fcaccf5c1faaaeb2608a8772 100644 (file)
@@ -253,6 +253,9 @@ namespace parallel
       int ierr = MPI_Barrier(this->mpi_communicator);
       AssertThrowMPI(ierr);
 
+      const int mpi_tag = Utilities::MPI::internal::Tags::
+        triangulation_base_fill_level_ghost_owners;
+
       // important: preallocate to avoid (re)allocation:
       std::vector<MPI_Request> requests(
         this->number_cache.level_ghost_owners.size());
@@ -268,7 +271,7 @@ namespace parallel
                            1,
                            MPI_UNSIGNED,
                            *it,
-                           9001,
+                           mpi_tag,
                            this->mpi_communicator,
                            &requests[req_counter]);
           AssertThrowMPI(ierr);
@@ -284,7 +287,7 @@ namespace parallel
                           1,
                           MPI_UNSIGNED,
                           *it,
-                          9001,
+                          mpi_tag,
                           this->mpi_communicator,
                           MPI_STATUS_IGNORE);
           AssertThrowMPI(ierr);
index c0cb7035df2e5937404fbf3e6615f7f2354287d3..a328fcfe6460028a197bba820e27dc2bf69150e3 100644 (file)
@@ -2498,6 +2498,13 @@ namespace GridTools
     for (; global_index_it != global_index_end; ++global_index_it)
       global_index_it->second += shift;
 
+
+    const int mpi_tag = Utilities::MPI::internal::Tags::
+      grid_tools_compute_local_to_global_vertex_index_map;
+    const int mpi_tag2 = Utilities::MPI::internal::Tags::
+      grid_tools_compute_local_to_global_vertex_index_map2;
+
+
     // In a first message, send the global ID of the vertices and the local
     // positions in the cells. In a second messages, send the cell ID as a
     // resize string. This is done in two messages so that types are not mixed
@@ -2535,7 +2542,7 @@ namespace GridTools
                          buffer_size,
                          DEAL_II_VERTEX_INDEX_MPI_TYPE,
                          destination,
-                         0,
+                         mpi_tag,
                          triangulation.get_communicator(),
                          &first_requests[i]);
         AssertThrowMPI(ierr);
@@ -2560,7 +2567,7 @@ namespace GridTools
                         buffer_size,
                         DEAL_II_VERTEX_INDEX_MPI_TYPE,
                         source,
-                        0,
+                        mpi_tag,
                         triangulation.get_communicator(),
                         MPI_STATUS_IGNORE);
         AssertThrowMPI(ierr);
@@ -2601,7 +2608,7 @@ namespace GridTools
                          buffer_size,
                          MPI_CHAR,
                          destination,
-                         0,
+                         mpi_tag2,
                          triangulation.get_communicator(),
                          &second_requests[i]);
         AssertThrowMPI(ierr);
@@ -2624,7 +2631,7 @@ namespace GridTools
                         buffer_size,
                         MPI_CHAR,
                         source,
-                        0,
+                        mpi_tag2,
                         triangulation.get_communicator(),
                         MPI_STATUS_IGNORE);
         AssertThrowMPI(ierr);
index 3334cf4525465afb0c2414912960cde237ab565e..4b729718a011c87c9bbf5db3a2f1543cf4fc78a9 100644 (file)
@@ -392,9 +392,11 @@ ScaLAPACKMatrix<NumberType>::copy_from(const LAPACKFullMatrix<NumberType> &B,
   MPI_Group              group_B;
   MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
   MPI_Comm communicator_B;
+
+  const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_from;
   Utilities::MPI::create_group(this->grid->mpi_communicator,
                                group_B,
-                               0,
+                               mpi_tag,
                                &communicator_B);
   int n_proc_rows_B = 1, n_proc_cols_B = 1;
   int this_process_row_B = -1, this_process_column_B = -1;
@@ -560,9 +562,11 @@ ScaLAPACKMatrix<NumberType>::copy_to(LAPACKFullMatrix<NumberType> &B,
   MPI_Group              group_B;
   MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
   MPI_Comm communicator_B;
+
+  const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_to;
   Utilities::MPI::create_group(this->grid->mpi_communicator,
                                group_B,
-                               0,
+                               mpi_tag,
                                &communicator_B);
   int n_proc_rows_B = 1, n_proc_cols_B = 1;
   int this_process_row_B = -1, this_process_column_B = -1;
@@ -893,9 +897,11 @@ ScaLAPACKMatrix<NumberType>::copy_to(ScaLAPACKMatrix<NumberType> &dest) const
       // first argument, even if the program we are currently running
       // and that is calling this function only works on a subset of
       // processes. the same holds for the wrapper/fallback we are using here.
-      ierr = Utilities::MPI::create_group(MPI_COMM_WORLD,
+
+      const int mpi_tag = Utilities::MPI::internal::Tags::scalapack_copy_to2;
+      ierr              = Utilities::MPI::create_group(MPI_COMM_WORLD,
                                           group_union,
-                                          5,
+                                          mpi_tag,
                                           &mpi_communicator_union);
       AssertThrowMPI(ierr);
 
index 95af0a13192a893c4e8f269b3e3df592cc391171..65aa37fbff42e93dd97acbeb79ce8d6118868d96 100644 (file)
@@ -937,6 +937,9 @@ namespace Particles
 
     // Notify other processors how many particles we will send
     {
+      const int mpi_tag = Utilities::MPI::internal::Tags::
+        particle_handler_send_recv_particles_setup;
+
       std::vector<MPI_Request> n_requests(2 * n_neighbors);
       for (unsigned int i = 0; i < n_neighbors; ++i)
         {
@@ -944,7 +947,7 @@ namespace Particles
                                      1,
                                      MPI_UNSIGNED,
                                      neighbors[i],
-                                     0,
+                                     mpi_tag,
                                      triangulation->get_communicator(),
                                      &(n_requests[2 * i]));
           AssertThrowMPI(ierr);
@@ -955,7 +958,7 @@ namespace Particles
                                      1,
                                      MPI_UNSIGNED,
                                      neighbors[i],
-                                     0,
+                                     mpi_tag,
                                      triangulation->get_communicator(),
                                      &(n_requests[2 * i + 1]));
           AssertThrowMPI(ierr);
@@ -982,6 +985,9 @@ namespace Particles
       unsigned int             send_ops = 0;
       unsigned int             recv_ops = 0;
 
+      const int mpi_tag = Utilities::MPI::internal::Tags::
+        particle_handler_send_recv_particles_send;
+
       for (unsigned int i = 0; i < n_neighbors; ++i)
         if (n_recv_data[i] > 0)
           {
@@ -989,7 +995,7 @@ namespace Particles
                                        n_recv_data[i],
                                        MPI_CHAR,
                                        neighbors[i],
-                                       1,
+                                       mpi_tag,
                                        triangulation->get_communicator(),
                                        &(requests[send_ops]));
             AssertThrowMPI(ierr);
@@ -1003,7 +1009,7 @@ namespace Particles
                                        n_send_data[i],
                                        MPI_CHAR,
                                        neighbors[i],
-                                       1,
+                                       mpi_tag,
                                        triangulation->get_communicator(),
                                        &(requests[send_ops + recv_ops]));
             AssertThrowMPI(ierr);

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.