]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add some more AssertThrowMPIs. 11979/head
authorDavid Wells <drwells@email.unc.edu>
Mon, 29 Mar 2021 17:07:08 +0000 (13:07 -0400)
committerDavid Wells <drwells@email.unc.edu>
Mon, 29 Mar 2021 17:07:31 +0000 (13:07 -0400)
source/base/mpi.cc
source/base/partitioner.cc
source/base/timer.cc
source/distributed/tria_base.cc
source/dofs/dof_handler_policy.cc
source/dofs/dof_renumbering.cc
source/matrix_free/vector_data_exchange.cc
source/particles/generators.cc

index 619d5fa5bf477b56bd0a6a91ec3c08e9b71fa905..c47c1eb2449e8cbf8b6524d1715a56996dc793e5 100644 (file)
@@ -147,8 +147,9 @@ namespace Utilities
       const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
 
       std::vector<unsigned int> ranks(size);
-      MPI_Allgather(
+      const int                 ierr = MPI_Allgather(
         &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
+      AssertThrowMPI(ierr);
 
       return ranks;
     }
@@ -531,21 +532,23 @@ namespace Utilities
       std::vector<unsigned int> buffer(dest_vector.size());
       unsigned int              n_recv_from = 0;
 
-      MPI_Reduce(dest_vector.data(),
-                 buffer.data(),
-                 dest_vector.size(),
-                 MPI_UNSIGNED,
-                 MPI_SUM,
-                 0,
-                 mpi_comm);
-      MPI_Scatter(buffer.data(),
-                  1,
-                  MPI_UNSIGNED,
-                  &n_recv_from,
-                  1,
-                  MPI_UNSIGNED,
-                  0,
-                  mpi_comm);
+      int ierr = MPI_Reduce(dest_vector.data(),
+                            buffer.data(),
+                            dest_vector.size(),
+                            MPI_UNSIGNED,
+                            MPI_SUM,
+                            0,
+                            mpi_comm);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Scatter(buffer.data(),
+                         1,
+                         MPI_UNSIGNED,
+                         &n_recv_from,
+                         1,
+                         MPI_UNSIGNED,
+                         0,
+                         mpi_comm);
+      AssertThrowMPI(ierr);
 
       return n_recv_from;
 #  endif
index 33acfd77baae8bccb269b97e2951978d31d6de18..b6639aa92f8feb1b05503dad4d04addab0f361dc 100644 (file)
@@ -76,12 +76,14 @@ namespace Utilities
       types::global_dof_index prefix_sum = 0;
 
 #ifdef DEAL_II_WITH_MPI
-      MPI_Exscan(&local_size,
-                 &prefix_sum,
-                 1,
-                 Utilities::MPI::internal::mpi_type_id(&prefix_sum),
-                 MPI_SUM,
-                 communicator);
+      const int ierr =
+        MPI_Exscan(&local_size,
+                   &prefix_sum,
+                   1,
+                   Utilities::MPI::internal::mpi_type_id(&prefix_sum),
+                   MPI_SUM,
+                   communicator);
+      AssertThrowMPI(ierr);
 #endif
 
       local_range_data = {prefix_sum, prefix_sum + local_size};
index 2f7e84ce8677bebc444ce575e9473956ee3e0028..61f75de4f381136e1549b4144683b439ed274c56 100644 (file)
@@ -889,14 +889,15 @@ TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm,
         std::vector<double> receive_data(my_rank == 0 ? n_ranks : 0);
         std::vector<double> result(9);
 #ifdef DEAL_II_WITH_MPI
-        MPI_Gather(&given_time,
-                   1,
-                   MPI_DOUBLE,
-                   receive_data.data(),
-                   1,
-                   MPI_DOUBLE,
-                   0,
-                   mpi_comm);
+        int ierr = MPI_Gather(&given_time,
+                              1,
+                              MPI_DOUBLE,
+                              receive_data.data(),
+                              1,
+                              MPI_DOUBLE,
+                              0,
+                              mpi_comm);
+        AssertThrowMPI(ierr);
         if (my_rank == 0)
           {
             // fill the received data in a pair and sort; on the way, also
@@ -923,7 +924,8 @@ TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm,
             result[7] = data_rank[n_ranks - 1].first;
             result[8] = data_rank[n_ranks - 1].second;
           }
-        MPI_Bcast(result.data(), 9, MPI_DOUBLE, 0, mpi_comm);
+        ierr = MPI_Bcast(result.data(), 9, MPI_DOUBLE, 0, mpi_comm);
+        AssertThrowMPI(ierr);
 #endif
         out_stream << std::setw(10) << std::setprecision(4) << std::right;
         out_stream << result[0] << "s ";
index 203a1365f8c813a9889dc4b351098854cf3e6e62..fe6e2b1337395753e0ccb4ef2543541394f8b95d 100644 (file)
@@ -430,12 +430,14 @@ namespace parallel
     // 2) determine the offset of each process
     types::global_cell_index cell_index = 0;
 
-    MPI_Exscan(&n_locally_owned_cells,
-               &cell_index,
-               1,
-               Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells),
-               MPI_SUM,
-               this->mpi_communicator);
+    const int ierr =
+      MPI_Exscan(&n_locally_owned_cells,
+                 &cell_index,
+                 1,
+                 Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells),
+                 MPI_SUM,
+                 this->mpi_communicator);
+    AssertThrowMPI(ierr);
 
     // 3) give global indices to locally-owned cells and mark all other cells as
     //    invalid
@@ -498,13 +500,14 @@ namespace parallel
         std::vector<types::global_cell_index> cell_index(
           this->n_global_levels(), 0);
 
-        MPI_Exscan(n_locally_owned_cells.data(),
-                   cell_index.data(),
-                   this->n_global_levels(),
-                   Utilities::MPI::internal::mpi_type_id(
-                     n_locally_owned_cells.data()),
-                   MPI_SUM,
-                   this->mpi_communicator);
+        int ierr = MPI_Exscan(n_locally_owned_cells.data(),
+                              cell_index.data(),
+                              this->n_global_levels(),
+                              Utilities::MPI::internal::mpi_type_id(
+                                n_locally_owned_cells.data()),
+                              MPI_SUM,
+                              this->mpi_communicator);
+        AssertThrowMPI(ierr);
 
         // 3) determine global number of "active" cells on each level
         std::vector<types::global_cell_index> n_cells_level(
@@ -513,11 +516,13 @@ namespace parallel
         for (unsigned int l = 0; l < this->n_global_levels(); ++l)
           n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l];
 
-        MPI_Bcast(n_cells_level.data(),
-                  this->n_global_levels(),
-                  Utilities::MPI::internal::mpi_type_id(n_cells_level.data()),
-                  this->n_subdomains - 1,
-                  this->mpi_communicator);
+        ierr =
+          MPI_Bcast(n_cells_level.data(),
+                    this->n_global_levels(),
+                    Utilities::MPI::internal::mpi_type_id(n_cells_level.data()),
+                    this->n_subdomains - 1,
+                    this->mpi_communicator);
+        AssertThrowMPI(ierr);
 
         // 4) give global indices to locally-owned cells on level and mark
         //    all other cells as invalid
index e9f07561ccca071cfd467c5967bbd4e02395a356..5a5cdbd59355de546523b8b3408824f89426c8bf 100644 (file)
@@ -4054,6 +4054,7 @@ namespace internal
                                triangulation->get_communicator()) -
                                1,
                              triangulation->get_communicator());
+            AssertThrowMPI(ierr);
 
             // shift indices
             for (types::global_dof_index &index : renumbering)
index f572a09eee643429949ac37d2feff5e008d40d70..544bf909288aa251568c8a3a0eb8c74978970665 100644 (file)
@@ -1384,12 +1384,13 @@ namespace DoFRenumbering
 #ifdef DEAL_II_WITH_MPI
         types::global_dof_index locally_owned_size =
           dof_handler.locally_owned_dofs().n_elements();
-        MPI_Exscan(&locally_owned_size,
-                   &my_starting_index,
-                   1,
-                   DEAL_II_DOF_INDEX_MPI_TYPE,
-                   MPI_SUM,
-                   tria->get_communicator());
+        const int ierr = MPI_Exscan(&locally_owned_size,
+                                    &my_starting_index,
+                                    1,
+                                    DEAL_II_DOF_INDEX_MPI_TYPE,
+                                    MPI_SUM,
+                                    tria->get_communicator());
+        AssertThrowMPI(ierr);
 #endif
       }
 
index 2da6124f4e0cb41e6816ead919ad2a3d6239db40..7fe87252db9da95e6d47f4e8c949c05a582b7ebd 100644 (file)
@@ -576,26 +576,35 @@ namespace internal
                                             sm_import_ranks.size());
 
           for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
-            MPI_Isend(sm_export_data_this_indices.data() +
-                        sm_export_data_this_ptr[i],
-                      sm_export_data_this_ptr[i + 1] -
-                        sm_export_data_this_ptr[i],
-                      MPI_UNSIGNED,
-                      sm_ghost_ranks[i],
-                      4,
-                      comm_sm,
-                      requests.data() + i);
+            {
+              const int ierr = MPI_Isend(sm_export_data_this_indices.data() +
+                                           sm_export_data_this_ptr[i],
+                                         sm_export_data_this_ptr[i + 1] -
+                                           sm_export_data_this_ptr[i],
+                                         MPI_UNSIGNED,
+                                         sm_ghost_ranks[i],
+                                         4,
+                                         comm_sm,
+                                         requests.data() + i);
+              AssertThrowMPI(ierr);
+            }
 
           for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
-            MPI_Irecv(sm_import_data_indices.data() + sm_import_data_ptr[i],
-                      sm_import_data_ptr[i + 1] - sm_import_data_ptr[i],
-                      MPI_UNSIGNED,
-                      sm_import_ranks[i],
-                      4,
-                      comm_sm,
-                      requests.data() + sm_ghost_ranks.size() + i);
+            {
+              const int ierr =
+                MPI_Irecv(sm_import_data_indices.data() + sm_import_data_ptr[i],
+                          sm_import_data_ptr[i + 1] - sm_import_data_ptr[i],
+                          MPI_UNSIGNED,
+                          sm_import_ranks[i],
+                          4,
+                          comm_sm,
+                          requests.data() + sm_ghost_ranks.size() + i);
+              AssertThrowMPI(ierr);
+            }
 
-          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+          const int ierr =
+            MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
         }
 
         // send sm_import_data_this to sm-neighbor -> sm_export_data_indices
@@ -604,26 +613,35 @@ namespace internal
                                             sm_ghost_ranks.size());
 
           for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
-            MPI_Isend(sm_import_data_this_indices.data() +
-                        sm_import_data_this_ptr[i],
-                      sm_import_data_this_ptr[i + 1] -
-                        sm_import_data_this_ptr[i],
-                      MPI_UNSIGNED,
-                      sm_import_ranks[i],
-                      2,
-                      comm_sm,
-                      requests.data() + i);
+            {
+              const int ierr = MPI_Isend(sm_import_data_this_indices.data() +
+                                           sm_import_data_this_ptr[i],
+                                         sm_import_data_this_ptr[i + 1] -
+                                           sm_import_data_this_ptr[i],
+                                         MPI_UNSIGNED,
+                                         sm_import_ranks[i],
+                                         2,
+                                         comm_sm,
+                                         requests.data() + i);
+              AssertThrowMPI(ierr);
+            }
 
           for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
-            MPI_Irecv(sm_export_data_indices.data() + sm_export_data_ptr[i],
-                      sm_export_data_ptr[i + 1] - sm_export_data_ptr[i],
-                      MPI_UNSIGNED,
-                      sm_ghost_ranks[i],
-                      2,
-                      comm_sm,
-                      requests.data() + sm_import_ranks.size() + i);
+            {
+              const int ierr =
+                MPI_Irecv(sm_export_data_indices.data() + sm_export_data_ptr[i],
+                          sm_export_data_ptr[i + 1] - sm_export_data_ptr[i],
+                          MPI_UNSIGNED,
+                          sm_ghost_ranks[i],
+                          2,
+                          comm_sm,
+                          requests.data() + sm_import_ranks.size() + i);
+              AssertThrowMPI(ierr);
+            }
 
-          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+          const int ierr =
+            MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
         }
 
         // store data structures and, if needed, compress them
@@ -843,23 +861,30 @@ namespace internal
         int dummy;
         // receive a signal that relevant sm neighbors are ready
         for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
-          MPI_Irecv(&dummy,
-                    0,
-                    MPI_INT,
-                    sm_ghost_ranks[i],
-                    communication_channel + 0,
-                    comm_sm,
-                    requests.data() + sm_import_ranks.size() + i);
+          {
+            const int ierr =
+              MPI_Irecv(&dummy,
+                        0,
+                        MPI_INT,
+                        sm_ghost_ranks[i],
+                        communication_channel + 0,
+                        comm_sm,
+                        requests.data() + sm_import_ranks.size() + i);
+            AssertThrowMPI(ierr);
+          }
 
         // signal to all relevant sm neighbors that this process is ready
         for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
-          MPI_Isend(&dummy,
-                    0,
-                    MPI_INT,
-                    sm_import_ranks[i],
-                    communication_channel + 0,
-                    comm_sm,
-                    requests.data() + i);
+          {
+            const int ierr = MPI_Isend(&dummy,
+                                       0,
+                                       MPI_INT,
+                                       sm_import_ranks[i],
+                                       communication_channel + 0,
+                                       comm_sm,
+                                       requests.data() + i);
+            AssertThrowMPI(ierr);
+          }
 
         // receive data from remote processes
         for (unsigned int i = 0; i < ghost_targets_data.size(); i++)
@@ -868,14 +893,16 @@ namespace internal
               n_ghost_indices_in_larger_set_by_remote_rank[i] -
               ghost_targets_data[i][2];
 
-            MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset,
-                      ghost_targets_data[i][2],
-                      Utilities::MPI::internal::mpi_type_id(buffer.data()),
-                      ghost_targets_data[i][0],
-                      communication_channel + 1,
-                      comm,
-                      requests.data() + sm_import_ranks.size() +
-                        sm_ghost_ranks.size() + i);
+            const int ierr =
+              MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset,
+                        ghost_targets_data[i][2],
+                        Utilities::MPI::internal::mpi_type_id(buffer.data()),
+                        ghost_targets_data[i][0],
+                        communication_channel + 1,
+                        comm,
+                        requests.data() + sm_import_ranks.size() +
+                          sm_ghost_ranks.size() + i);
+            AssertThrowMPI(ierr);
           }
 
         // send data to remote processes
@@ -890,14 +917,17 @@ namespace internal
                   data_this[import_indices_data.second[j].first + l];
 
             // send data away
-            MPI_Isend(temporary_storage.data() + import_targets_data[i][1],
-                      import_targets_data[i][2],
-                      Utilities::MPI::internal::mpi_type_id(data_this.data()),
-                      import_targets_data[i][0],
-                      communication_channel + 1,
-                      comm,
-                      requests.data() + sm_import_ranks.size() +
-                        sm_ghost_ranks.size() + ghost_targets_data.size() + i);
+            const int ierr =
+              MPI_Isend(temporary_storage.data() + import_targets_data[i][1],
+                        import_targets_data[i][2],
+                        Utilities::MPI::internal::mpi_type_id(data_this.data()),
+                        import_targets_data[i][0],
+                        communication_channel + 1,
+                        comm,
+                        requests.data() + sm_import_ranks.size() +
+                          sm_ghost_ranks.size() + ghost_targets_data.size() +
+                          i);
+            AssertThrowMPI(ierr);
           }
 #endif
       }
@@ -942,11 +972,13 @@ namespace internal
              c < sm_ghost_ranks.size() + ghost_targets_data.size();
              c++)
           {
-            int i;
-            MPI_Waitany(sm_ghost_ranks.size() + ghost_targets_data.size(),
-                        requests.data() + sm_import_ranks.size(),
-                        &i,
-                        MPI_STATUS_IGNORE);
+            int       i;
+            const int ierr =
+              MPI_Waitany(sm_ghost_ranks.size() + ghost_targets_data.size(),
+                          requests.data() + sm_import_ranks.size(),
+                          &i,
+                          MPI_STATUS_IGNORE);
+            AssertThrowMPI(ierr);
 
             const auto s = split(i);
             i            = s.second;
@@ -1032,7 +1064,10 @@ namespace internal
               }
           }
 
-        MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+        const int ierr =
+          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+        AssertThrowMPI(ierr);
+
 #endif
       }
 
@@ -1073,22 +1108,29 @@ namespace internal
 
         int dummy;
         for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
-          MPI_Isend(&dummy,
-                    0,
-                    MPI_INT,
-                    sm_ghost_ranks[i],
-                    communication_channel + 1,
-                    comm_sm,
-                    requests.data() + i);
+          {
+            const int ierr = MPI_Isend(&dummy,
+                                       0,
+                                       MPI_INT,
+                                       sm_ghost_ranks[i],
+                                       communication_channel + 1,
+                                       comm_sm,
+                                       requests.data() + i);
+            AssertThrowMPI(ierr);
+          }
 
         for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
-          MPI_Irecv(&dummy,
-                    0,
-                    MPI_INT,
-                    sm_import_ranks[i],
-                    communication_channel + 1,
-                    comm_sm,
-                    requests.data() + sm_ghost_ranks.size() + i);
+          {
+            const int ierr =
+              MPI_Irecv(&dummy,
+                        0,
+                        MPI_INT,
+                        sm_import_ranks[i],
+                        communication_channel + 1,
+                        comm_sm,
+                        requests.data() + sm_ghost_ranks.size() + i);
+            AssertThrowMPI(ierr);
+          }
 
         for (unsigned int i = 0; i < ghost_targets_data.size(); i++)
           {
@@ -1128,26 +1170,31 @@ namespace internal
                   }
               }
 
-            MPI_Isend(buffer.data() + ghost_targets_data[i][1],
-                      ghost_targets_data[i][2],
-                      Utilities::MPI::internal::mpi_type_id(buffer.data()),
-                      ghost_targets_data[i][0],
-                      communication_channel + 0,
-                      comm,
-                      requests.data() + sm_ghost_ranks.size() +
-                        sm_import_ranks.size() + i);
+            const int ierr =
+              MPI_Isend(buffer.data() + ghost_targets_data[i][1],
+                        ghost_targets_data[i][2],
+                        Utilities::MPI::internal::mpi_type_id(buffer.data()),
+                        ghost_targets_data[i][0],
+                        communication_channel + 0,
+                        comm,
+                        requests.data() + sm_ghost_ranks.size() +
+                          sm_import_ranks.size() + i);
+            AssertThrowMPI(ierr);
           }
 
         for (unsigned int i = 0; i < import_targets_data.size(); i++)
-          MPI_Irecv(temporary_storage.data() + import_targets_data[i][1],
-                    import_targets_data[i][2],
-                    Utilities::MPI::internal::mpi_type_id(
-                      temporary_storage.data()),
-                    import_targets_data[i][0],
-                    communication_channel + 0,
-                    comm,
-                    requests.data() + sm_ghost_ranks.size() +
-                      sm_import_ranks.size() + ghost_targets_data.size() + i);
+          {
+            const int ierr = MPI_Irecv(
+              temporary_storage.data() + import_targets_data[i][1],
+              import_targets_data[i][2],
+              Utilities::MPI::internal::mpi_type_id(temporary_storage.data()),
+              import_targets_data[i][0],
+              communication_channel + 0,
+              comm,
+              requests.data() + sm_ghost_ranks.size() + sm_import_ranks.size() +
+                ghost_targets_data.size() + i);
+            AssertThrowMPI(ierr);
+          }
 #endif
       }
 
@@ -1202,12 +1249,14 @@ namespace internal
                    ghost_targets_data.size();
              c++)
           {
-            int i;
-            MPI_Waitany(sm_import_ranks.size() + import_targets_data.size() +
-                          ghost_targets_data.size(),
-                        requests.data() + sm_ghost_ranks.size(),
-                        &i,
-                        MPI_STATUS_IGNORE);
+            int       i;
+            const int ierr =
+              MPI_Waitany(sm_import_ranks.size() + import_targets_data.size() +
+                            ghost_targets_data.size(),
+                          requests.data() + sm_ghost_ranks.size(),
+                          &i,
+                          MPI_STATUS_IGNORE);
+            AssertThrowMPI(ierr);
 
             const auto &s = split(i);
             i             = s.second;
@@ -1268,7 +1317,9 @@ namespace internal
               }
           }
 
-        MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+        const int ierr =
+          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+        AssertThrowMPI(ierr);
 #endif
       }
 
index d12445fa9594b312b148cb3d4402772f5a4a81f3..60c78be8c075c5590c8b733d014933bf748d6d9a 100644 (file)
@@ -129,12 +129,13 @@ namespace Particles
 
           // The local particle start index is the number of all particles
           // generated on lower MPI ranks.
-          MPI_Exscan(&n_particles_to_generate,
-                     &particle_index,
-                     1,
-                     DEAL_II_PARTICLE_INDEX_MPI_TYPE,
-                     MPI_SUM,
-                     tria->get_communicator());
+          const int ierr = MPI_Exscan(&n_particles_to_generate,
+                                      &particle_index,
+                                      1,
+                                      DEAL_II_PARTICLE_INDEX_MPI_TYPE,
+                                      MPI_SUM,
+                                      tria->get_communicator());
+          AssertThrowMPI(ierr);
         }
 #endif
 
@@ -295,12 +296,13 @@ namespace Particles
               dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
                 &triangulation))
           {
-            MPI_Exscan(&local_weight_integral,
-                       &local_start_weight,
-                       1,
-                       MPI_DOUBLE,
-                       MPI_SUM,
-                       tria->get_communicator());
+            const int ierr = MPI_Exscan(&local_weight_integral,
+                                        &local_start_weight,
+                                        1,
+                                        MPI_DOUBLE,
+                                        MPI_SUM,
+                                        tria->get_communicator());
+            AssertThrowMPI(ierr);
           }
 #endif
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.