]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add some more MPI checks. 6491/head
authorDavid Wells <wellsd2@rpi.edu>
Sun, 6 May 2018 03:46:19 +0000 (23:46 -0400)
committerDavid Wells <wellsd2@rpi.edu>
Sun, 6 May 2018 03:46:19 +0000 (23:46 -0400)
source/base/process_grid.cc
source/grid/grid_tools.cc
source/lac/scalapack.cc
source/particles/particle_handler.cc

index 362749dd326e65b79c2bdc6bb92c5d04c3b9f064..9f0465553998b8bcfb6326a047d4bae5fcf7dc18 100644 (file)
@@ -175,8 +175,10 @@ namespace Utilities
                              &mpi_communicator_inactive_with_root);
       AssertThrowMPI(ierr);
 
-      MPI_Group_free(&all_group);
-      MPI_Group_free(&inactive_with_root_group);
+      ierr = MPI_Group_free(&all_group);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Group_free(&inactive_with_root_group);
+      AssertThrowMPI(ierr);
 
       // Double check that the process with rank 0 in subgroup is active:
 #ifdef DEBUG
index 1bdc4a4401c071454494b39a238d4e31e1cf0f6c..90c431d3df80619db94fcbbf86ab307e34a8309e 100644 (file)
@@ -4621,9 +4621,10 @@ next_cell:
     std::vector<int> size_all_data(n_procs);
 
     // Exchanging the number of bboxes
-    MPI_Allgather(&n_local_data, 1, MPI_INT,
-                  &(size_all_data[0]), 1, MPI_INT,
-                  mpi_communicator);
+    int ierr = MPI_Allgather(&n_local_data, 1, MPI_INT,
+                             &(size_all_data[0]), 1, MPI_INT,
+                             mpi_communicator);
+    AssertThrowMPI(ierr);
 
     // Now computing the the displacement, relative to recvbuf,
     // at which to store the incoming data
@@ -4636,9 +4637,10 @@ next_cell:
     // Allocating a vector to contain all the received data
     std::vector<double> data_array(rdispls.back() + size_all_data.back());
 
-    MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
-                   &(data_array[0]), &(size_all_data[0]),
-                   &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
+    ierr = MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
+                          &(data_array[0]), &(size_all_data[0]),
+                          &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
+    AssertThrowMPI(ierr);
 
     // Step 4: create the array of bboxes for output
     std::vector< std::vector< BoundingBox<spacedim> > > global_bboxes(n_procs);
index ae8cea06629a8793456fe7262672ab082a054b88..ec624995943241a75828c65c19dc2bae35ade739 100644 (file)
@@ -473,10 +473,16 @@ ScaLAPACKMatrix<NumberType>::copy_to (ScaLAPACKMatrix<NumberType> &dest) const
       Cblacs_gridexit(union_blacs_context);
 
       if (mpi_communicator_union != MPI_COMM_NULL)
-        MPI_Comm_free(&mpi_communicator_union);
-      MPI_Group_free(&group_source);
-      MPI_Group_free(&group_dest);
-      MPI_Group_free(&group_union);
+        {
+          ierr = MPI_Comm_free(&mpi_communicator_union);
+          AssertThrowMPI(ierr);
+        }
+      ierr = MPI_Group_free(&group_source);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Group_free(&group_dest);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Group_free(&group_union);
+      AssertThrowMPI(ierr);
     }
   else
     //process is active in the process grid
@@ -1792,8 +1798,14 @@ void ScaLAPACKMatrix<NumberType>::save_parallel(const char *filename,
 
   // gather the number of local rows and columns from all processes
   std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
-  MPI_Allgather(&tmp.n_local_rows,1,MPI_INT,proc_n_local_rows.data(),1,MPI_INT,tmp.grid->mpi_communicator);
-  MPI_Allgather(&tmp.n_local_columns,1,MPI_INT,proc_n_local_columns.data(),1,MPI_INT,tmp.grid->mpi_communicator);
+  int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
+                           proc_n_local_rows.data(), 1, MPI_INT,
+                           tmp.grid->mpi_communicator);
+  AssertThrowMPI(ierr);
+  ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
+                       proc_n_local_columns.data(), 1, MPI_INT,
+                       tmp.grid->mpi_communicator);
+  AssertThrowMPI(ierr);
 
   const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
 
@@ -1839,7 +1851,8 @@ void ScaLAPACKMatrix<NumberType>::save_parallel(const char *filename,
 
   // before writing the state and property to file wait for
   // all processes to finish writing the matrix content to the file
-  MPI_Barrier(tmp.grid->mpi_communicator);
+  ierr = MPI_Barrier(tmp.grid->mpi_communicator);
+  AssertThrowMPI(ierr);
 
   // only root process will write state and property to the file
   if (tmp.grid->this_mpi_process==0)
@@ -2139,8 +2152,14 @@ void ScaLAPACKMatrix<NumberType>::load_parallel(const char *filename)
 
   // gather the number of local rows and columns from all processes
   std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
-  MPI_Allgather(&tmp.n_local_rows,1,MPI_INT,proc_n_local_rows.data(),1,MPI_INT,tmp.grid->mpi_communicator);
-  MPI_Allgather(&tmp.n_local_columns,1,MPI_INT,proc_n_local_columns.data(),1,MPI_INT,tmp.grid->mpi_communicator);
+  int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
+                           proc_n_local_rows.data(), 1, MPI_INT,
+                           tmp.grid->mpi_communicator);
+  AssertThrowMPI(ierr);
+  ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
+                       proc_n_local_columns.data(), 1, MPI_INT,
+                       tmp.grid->mpi_communicator);
+  AssertThrowMPI(ierr);
 
   const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
 
index 3bc443bbd9f829f4a194cbbf4615aa9ff1845ffe..e786eef43d9a3be5f582c407b0b4d34450ff3b14 100644 (file)
@@ -301,7 +301,10 @@ namespace Particles
 
 #ifdef DEAL_II_WITH_MPI
     types::particle_index particles_to_add_locally = positions.size();
-    MPI_Scan(&particles_to_add_locally, &local_start_index, 1, PARTICLE_INDEX_MPI_TYPE, MPI_SUM, triangulation->get_communicator());
+    const int ierr = MPI_Scan(&particles_to_add_locally, &local_start_index, 1,
+                              PARTICLE_INDEX_MPI_TYPE, MPI_SUM,
+                              triangulation->get_communicator());
+    AssertThrowMPI(ierr);
     local_start_index -= particles_to_add_locally;
 #endif
 
@@ -777,10 +780,19 @@ namespace Particles
     {
       std::vector<MPI_Request> n_requests(2*n_neighbors);
       for (unsigned int i=0; i<n_neighbors; ++i)
-        MPI_Irecv(&(n_recv_data[i]), 1, MPI_INT, neighbors[i], 0, triangulation->get_communicator(), &(n_requests[2*i]));
+        {
+          const int ierr = MPI_Irecv(&(n_recv_data[i]), 1, MPI_INT, neighbors[i],
+                                     0, triangulation->get_communicator(), &(n_requests[2*i]));
+          AssertThrowMPI(ierr);
+        }
       for (unsigned int i=0; i<n_neighbors; ++i)
-        MPI_Isend(&(n_send_data[i]), 1, MPI_INT, neighbors[i], 0, triangulation->get_communicator(), &(n_requests[2*i+1]));
-      MPI_Waitall(2*n_neighbors,&n_requests[0],MPI_STATUSES_IGNORE);
+        {
+          const int ierr = MPI_Isend(&(n_send_data[i]), 1, MPI_INT, neighbors[i],
+                                     0, triangulation->get_communicator(), &(n_requests[2*i+1]));
+          AssertThrowMPI(ierr);
+        }
+      const int ierr = MPI_Waitall(2*n_neighbors,&n_requests[0],MPI_STATUSES_IGNORE);
+      AssertThrowMPI(ierr);
     }
 
     // Determine how many particles and data we will receive
@@ -803,17 +815,24 @@ namespace Particles
       for (unsigned int i=0; i<n_neighbors; ++i)
         if (n_recv_data[i] > 0)
           {
-            MPI_Irecv(&(recv_data[recv_offsets[i]]), n_recv_data[i], MPI_CHAR, neighbors[i], 1, triangulation->get_communicator(),&(requests[send_ops]));
+            const int ierr = MPI_Irecv(&(recv_data[recv_offsets[i]]), n_recv_data[i], MPI_CHAR,
+                                       neighbors[i], 1, triangulation->get_communicator(),
+                                       &(requests[send_ops]));
+            AssertThrowMPI(ierr);
             send_ops++;
           }
 
       for (unsigned int i=0; i<n_neighbors; ++i)
         if (n_send_data[i] > 0)
           {
-            MPI_Isend(&(send_data[send_offsets[i]]), n_send_data[i], MPI_CHAR, neighbors[i], 1, triangulation->get_communicator(),&(requests[send_ops+recv_ops]));
+            const int ierr = MPI_Isend(&(send_data[send_offsets[i]]), n_send_data[i], MPI_CHAR,
+                                       neighbors[i], 1, triangulation->get_communicator(),
+                                       &(requests[send_ops+recv_ops]));
+            AssertThrowMPI(ierr);
             recv_ops++;
           }
-      MPI_Waitall(send_ops+recv_ops,&requests[0],MPI_STATUSES_IGNORE);
+      const int ierr = MPI_Waitall(send_ops+recv_ops,&requests[0],MPI_STATUSES_IGNORE);
+      AssertThrowMPI(ierr);
     }
 
     // Put the received particles into the domain if they are in the triangulation

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.