]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Check all MPI calls. 3517/head
authorDavid Wells <wellsd2@rpi.edu>
Thu, 10 Nov 2016 00:08:40 +0000 (19:08 -0500)
committerDavid Wells <wellsd2@rpi.edu>
Fri, 11 Nov 2016 20:03:52 +0000 (15:03 -0500)
This commit adds AssertThrowMPI checks to the return code of every MPI
call in deal.II.

26 files changed:
doc/news/changes.h
include/deal.II/base/mpi.templates.h
include/deal.II/lac/la_parallel_vector.templates.h
include/deal.II/matrix_free/mapping_info.templates.h
include/deal.II/matrix_free/matrix_free.templates.h
include/deal.II/multigrid/mg_transfer.templates.h
include/deal.II/numerics/vector_tools.templates.h
source/base/data_out_base.cc
source/base/index_set.cc
source/base/mpi.cc
source/base/partitioner.cc
source/base/timer.cc
source/base/utilities.cc
source/distributed/grid_refinement.cc
source/distributed/tria.cc
source/distributed/tria_base.cc
source/dofs/dof_handler_policy.cc
source/dofs/dof_renumbering.cc
source/dofs/dof_tools.cc
source/fe/fe_tools_extrapolate.cc
source/grid/grid_tools.cc
source/lac/petsc_matrix_base.cc
source/lac/petsc_parallel_vector.cc
source/lac/petsc_vector_base.cc
source/lac/sparsity_tools.cc
source/multigrid/mg_level_global_transfer.cc

index b4e8ea63bc3dfbe8207e86036ff84c99ac34d3ae..80ea3a440f8189752961797547b3f09290d8f44e 100644 (file)
@@ -215,6 +215,13 @@ inconvenience this causes.
 <h3>General</h3>
 
 <ol>
+<li> Improved: the error codes for all MPI functions are now checked and, if the
+     MPI function failed for any reason, an exception with a helpful message is
+     thrown.
+     <br>
+     (David Wells, 2016/11/09)
+</li>
+
  <li>
  Fixed: We have run the PVS static analysis checker on the entire code base,
  to see what possible problems it uncovers (see
@@ -466,7 +473,7 @@ inconvenience this causes.
  <br>
  (Rajat Arora, 2016/10/29)
  </li>
+
 <li> New: Add MatrixFreeOperators::MassOperator representing a mass matrix.
  <br>
  (Daniel Arndt, 2016/10/27)
index a5806cc217b1c9639be0b610daf14db60bb648fa..e67dc1fe87a31bc51a3ccc40b4f9c85b9194d5e6 100644 (file)
@@ -95,21 +95,23 @@ namespace Utilities
 #ifdef DEAL_II_WITH_MPI
         if (job_supports_mpi())
           {
-            MPI_Allreduce (values != output
-                           ?
-                           // TODO This const_cast is only needed for older
-                           // (e.g., openMPI 1.6, released in 2012)
-                           // implementations of MPI-2. It is not needed as of
-                           // MPI-3 and we should remove it at some point in
-                           // the future.
-                           const_cast<void *>(static_cast<const void *>(values))
-                           :
-                           MPI_IN_PLACE,
-                           static_cast<void *>(output),
-                           static_cast<int>(size),
-                           internal::mpi_type_id(values),
-                           mpi_op,
-                           mpi_communicator);
+            const int ierr = MPI_Allreduce
+                             (values != output
+                              ?
+                              // TODO This const_cast is only needed for older
+                              // (e.g., openMPI 1.6, released in 2012)
+                              // implementations of MPI-2. It is not needed as
+                              // of MPI-3 and we should remove it at some
+                              // point in the future.
+                              const_cast<void *>(static_cast<const void *>(values))
+                              :
+                              MPI_IN_PLACE,
+                              static_cast<void *>(output),
+                              static_cast<int>(size),
+                              internal::mpi_type_id(values),
+                              mpi_op,
+                              mpi_communicator);
+            AssertThrowMPI(ierr);
           }
         else
 #endif
@@ -132,21 +134,23 @@ namespace Utilities
         if (job_supports_mpi())
           {
             T dummy_selector;
-            MPI_Allreduce (values != output
-                           ?
-                           // TODO This const_cast is only needed for older
-                           // (e.g., openMPI 1.6, released in 2012)
-                           // implementations of MPI-2. It is not needed as of
-                           // MPI-3 and we should remove it at some point in
-                           // the future.
-                           const_cast<void *>(static_cast<const void *>(values))
-                           :
-                           MPI_IN_PLACE,
-                           static_cast<void *>(output),
-                           static_cast<int>(size*2),
-                           internal::mpi_type_id(&dummy_selector),
-                           mpi_op,
-                           mpi_communicator);
+            const int ierr = MPI_Allreduce
+                             (values != output
+                              ?
+                              // TODO This const_cast is only needed for older
+                              // (e.g., openMPI 1.6, released in 2012)
+                              // implementations of MPI-2. It is not needed as
+                              // of MPI-3 and we should remove it at some
+                              // point in the future.
+                              const_cast<void *>(static_cast<const void *>(values))
+                              :
+                              MPI_IN_PLACE,
+                              static_cast<void *>(output),
+                              static_cast<int>(size*2),
+                              internal::mpi_type_id(&dummy_selector),
+                              mpi_op,
+                              mpi_communicator);
+            AssertThrowMPI(ierr);
           }
         else
 #endif
index 1c3c48237d0909ee8f4aa6db9e671e7ee357a8ae..da622a1f75ec0e6c5c22b8be719a4d3a617bf0a0 100644 (file)
@@ -40,10 +40,16 @@ namespace LinearAlgebra
     {
 #ifdef DEAL_II_WITH_MPI
       for (size_type j=0; j<compress_requests.size(); j++)
-        MPI_Request_free(&compress_requests[j]);
+        {
+          const int ierr = MPI_Request_free(&compress_requests[j]);
+          AssertThrowMPI(ierr);
+        }
       compress_requests.clear();
       for (size_type j=0; j<update_ghost_values_requests.size(); j++)
-        MPI_Request_free(&update_ghost_values_requests[j]);
+        {
+          const int ierr = MPI_Request_free(&update_ghost_values_requests[j]);
+          AssertThrowMPI(ierr);
+        }
       update_ghost_values_requests.clear();
 #endif
     }
@@ -567,14 +573,15 @@ namespace LinearAlgebra
                            ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
                                       "The number of ghost entries times the size of 'Number' "
                                       "exceeds this value. This is not supported."));
-              MPI_Recv_init (&import_data[current_index_start],
-                             part.import_targets()[i].second*sizeof(Number),
-                             MPI_BYTE,
-                             part.import_targets()[i].first,
-                             part.import_targets()[i].first +
-                             part.n_mpi_processes()*channel,
-                             part.get_communicator(),
-                             &compress_requests[i]);
+              const int ierr = MPI_Recv_init (&import_data[current_index_start],
+                                              part.import_targets()[i].second*sizeof(Number),
+                                              MPI_BYTE,
+                                              part.import_targets()[i].first,
+                                              part.import_targets()[i].first +
+                                              part.n_mpi_processes()*channel,
+                                              part.get_communicator(),
+                                              &compress_requests[i]);
+              AssertThrowMPI (ierr);
               current_index_start += part.import_targets()[i].second;
             }
           AssertDimension(current_index_start, part.n_import_indices());
@@ -588,14 +595,15 @@ namespace LinearAlgebra
                            ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
                                       "The number of ghost entries times the size of 'Number' "
                                       "exceeds this value. This is not supported."));
-              MPI_Send_init (&this->val[current_index_start],
-                             part.ghost_targets()[i].second*sizeof(Number),
-                             MPI_BYTE,
-                             part.ghost_targets()[i].first,
-                             part.this_mpi_process() +
-                             part.n_mpi_processes()*channel,
-                             part.get_communicator(),
-                             &compress_requests[n_import_targets+i]);
+              const int ierr = MPI_Send_init (&this->val[current_index_start],
+                                              part.ghost_targets()[i].second*sizeof(Number),
+                                              MPI_BYTE,
+                                              part.ghost_targets()[i].first,
+                                              part.this_mpi_process() +
+                                              part.n_mpi_processes()*channel,
+                                              part.get_communicator(),
+                                              &compress_requests[n_import_targets+i]);
+              AssertThrowMPI (ierr);
               current_index_start += part.ghost_targets()[i].second;
             }
           AssertDimension (current_index_start,
@@ -606,9 +614,8 @@ namespace LinearAlgebra
                       compress_requests.size());
       if (compress_requests.size() > 0)
         {
-          int ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]);
-          (void)ierr;
-          Assert (ierr == MPI_SUCCESS, ExcInternalError());
+          const int ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]);
+          AssertThrowMPI(ierr);
         }
 #endif
     }
@@ -650,10 +657,9 @@ namespace LinearAlgebra
       // first wait for the receive to complete
       if (compress_requests.size() > 0 && n_import_targets > 0)
         {
-          int ierr = MPI_Waitall (n_import_targets, &compress_requests[0],
-                                  MPI_STATUSES_IGNORE);
-          (void)ierr;
-          Assert (ierr == MPI_SUCCESS, ExcInternalError());
+          const int ierr = MPI_Waitall (n_import_targets, &compress_requests[0],
+                                        MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
 
           Number *read_position = import_data;
           std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
@@ -682,11 +688,10 @@ namespace LinearAlgebra
 
       if (compress_requests.size() > 0 && n_ghost_targets > 0)
         {
-          int ierr = MPI_Waitall (n_ghost_targets,
-                                  &compress_requests[n_import_targets],
-                                  MPI_STATUSES_IGNORE);
-          (void)ierr;
-          Assert (ierr == MPI_SUCCESS, ExcInternalError());
+          const int ierr = MPI_Waitall (n_ghost_targets,
+                                        &compress_requests[n_import_targets],
+                                        MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
         }
       else
         AssertDimension (part.n_ghost_indices(), 0);
@@ -727,14 +732,15 @@ namespace LinearAlgebra
             {
               // allow writing into ghost indices even though we are in a
               // const function
-              MPI_Recv_init (const_cast<Number *>(&val[current_index_start]),
-                             part.ghost_targets()[i].second*sizeof(Number),
-                             MPI_BYTE,
-                             part.ghost_targets()[i].first,
-                             part.ghost_targets()[i].first +
-                             counter*part.n_mpi_processes(),
-                             part.get_communicator(),
-                             &update_ghost_values_requests[i]);
+              const int ierr = MPI_Recv_init (const_cast<Number *>(&val[current_index_start]),
+                                              part.ghost_targets()[i].second*sizeof(Number),
+                                              MPI_BYTE,
+                                              part.ghost_targets()[i].first,
+                                              part.ghost_targets()[i].first +
+                                              counter*part.n_mpi_processes(),
+                                              part.get_communicator(),
+                                              &update_ghost_values_requests[i]);
+              AssertThrowMPI (ierr);
               current_index_start += part.ghost_targets()[i].second;
             }
           AssertDimension (current_index_start,
@@ -746,13 +752,14 @@ namespace LinearAlgebra
           current_index_start = 0;
           for (unsigned int i=0; i<n_import_targets; i++)
             {
-              MPI_Send_init (&import_data[current_index_start],
-                             part.import_targets()[i].second*sizeof(Number),
-                             MPI_BYTE, part.import_targets()[i].first,
-                             part.this_mpi_process() +
-                             part.n_mpi_processes()*counter,
-                             part.get_communicator(),
-                             &update_ghost_values_requests[n_ghost_targets+i]);
+              const int ierr = MPI_Send_init (&import_data[current_index_start],
+                                              part.import_targets()[i].second*sizeof(Number),
+                                              MPI_BYTE, part.import_targets()[i].first,
+                                              part.this_mpi_process() +
+                                              part.n_mpi_processes()*counter,
+                                              part.get_communicator(),
+                                              &update_ghost_values_requests[n_ghost_targets+i]);
+              AssertThrowMPI (ierr);
               current_index_start += part.import_targets()[i].second;
             }
           AssertDimension (current_index_start, part.n_import_indices());
@@ -774,10 +781,9 @@ namespace LinearAlgebra
                        update_ghost_values_requests.size());
       if (update_ghost_values_requests.size() > 0)
         {
-          int ierr = MPI_Startall(update_ghost_values_requests.size(),
-                                  &update_ghost_values_requests[0]);
-          (void)ierr;
-          Assert (ierr == MPI_SUCCESS, ExcInternalError());
+          const int ierr = MPI_Startall(update_ghost_values_requests.size(),
+                                        &update_ghost_values_requests[0]);
+          AssertThrowMPI(ierr);
         }
 #else
       (void)counter;
@@ -801,11 +807,10 @@ namespace LinearAlgebra
           // make this function thread safe
           Threads::Mutex::ScopedLock lock (mutex);
 
-          int ierr = MPI_Waitall (update_ghost_values_requests.size(),
-                                  &update_ghost_values_requests[0],
-                                  MPI_STATUSES_IGNORE);
-          (void)ierr;
-          Assert (ierr == MPI_SUCCESS, ExcInternalError());
+          const int ierr = MPI_Waitall (update_ghost_values_requests.size(),
+                                        &update_ghost_values_requests[0],
+                                        MPI_STATUSES_IGNORE);
+          AssertThrowMPI (ierr);
         }
 #endif
       vector_is_ghosted = true;
@@ -869,19 +874,19 @@ namespace LinearAlgebra
           int flag = 1;
           if (update_ghost_values_requests.size()>0)
             {
-              int ierr = MPI_Testall (update_ghost_values_requests.size(),
-                                      &update_ghost_values_requests[0],
-                                      &flag, MPI_STATUSES_IGNORE);
-              Assert (ierr == MPI_SUCCESS, ExcInternalError());
+              const int ierr = MPI_Testall (update_ghost_values_requests.size(),
+                                            &update_ghost_values_requests[0],
+                                            &flag, MPI_STATUSES_IGNORE);
+              AssertThrowMPI (ierr);
               Assert (flag == 1,
                       ExcMessage("MPI found unfinished update_ghost_values() requests"
                                  "when calling swap, which is not allowed"));
             }
           if (compress_requests.size()>0)
             {
-              int ierr = MPI_Testall (compress_requests.size(), &compress_requests[0],
-                                      &flag, MPI_STATUSES_IGNORE);
-              Assert (ierr == MPI_SUCCESS, ExcInternalError());
+              const int ierr = MPI_Testall (compress_requests.size(), &compress_requests[0],
+                                            &flag, MPI_STATUSES_IGNORE);
+              AssertThrowMPI (ierr);
               Assert (flag == 1,
                       ExcMessage("MPI found unfinished compress() requests "
                                  "when calling swap, which is not allowed"));
@@ -1533,7 +1538,10 @@ namespace LinearAlgebra
 #ifdef DEAL_II_WITH_MPI
       if (partitioner->n_mpi_processes() > 1)
         for (unsigned int i=0; i<partitioner->this_mpi_process(); i++)
-          MPI_Barrier (partitioner->get_communicator());
+          {
+            const int ierr = MPI_Barrier (partitioner->get_communicator());
+            AssertThrowMPI (ierr);
+          }
 #endif
 
       out << "Process #" << partitioner->this_mpi_process() << std::endl
@@ -1568,11 +1576,15 @@ namespace LinearAlgebra
 #ifdef DEAL_II_WITH_MPI
       if (partitioner->n_mpi_processes() > 1)
         {
-          MPI_Barrier (partitioner->get_communicator());
+          int ierr = MPI_Barrier (partitioner->get_communicator());
+          AssertThrowMPI (ierr);
 
           for (unsigned int i=partitioner->this_mpi_process()+1;
                i<partitioner->n_mpi_processes(); i++)
-            MPI_Barrier (partitioner->get_communicator());
+            {
+              ierr = MPI_Barrier (partitioner->get_communicator());
+              AssertThrowMPI (ierr);
+            }
         }
 #endif
 
index 7923a53a6fca3e0b1a0709884f3598c1865de000..82ce6b40a432c672546ee6a3666951aa88c36c8d 100644 (file)
@@ -866,8 +866,9 @@ namespace internal
       // disable the check here only if no processor has any such data
 #ifdef DEAL_II_WITH_MPI
       unsigned int general_size_glob = 0, general_size_loc = jacobians.size();
-      MPI_Allreduce (&general_size_loc, &general_size_glob, 1, MPI_UNSIGNED,
-                     MPI_MAX, size_info.communicator);
+      int ierr = MPI_Allreduce (&general_size_loc, &general_size_glob, 1,
+                                MPI_UNSIGNED, MPI_MAX, size_info.communicator);
+      AssertThrowMPI (ierr);
 #else
       unsigned int general_size_glob = jacobians.size();
 #endif
@@ -885,8 +886,9 @@ namespace internal
 
 #ifdef DEAL_II_WITH_MPI
       unsigned int quad_size_glob = 0, quad_size_loc = quadrature_points.size();
-      MPI_Allreduce (&quad_size_loc, &quad_size_glob, 1, MPI_UNSIGNED,
-                     MPI_MAX, size_info.communicator);
+      ierr = MPI_Allreduce (&quad_size_loc, &quad_size_glob, 1, MPI_UNSIGNED,
+                            MPI_MAX, size_info.communicator);
+      AssertThrowMPI (ierr);
 #else
       unsigned int quad_size_glob = quadrature_points.size();
 #endif
index 72de1384445746dd478dc3f8d0740df53bdc4c5e..c6dfab2e8a767899022ceb9cd48c95e3f292a1db 100644 (file)
@@ -85,8 +85,9 @@ namespace internal
         if (Utilities::MPI::job_supports_mpi())
           {
             int communicators_same = 0;
-            MPI_Comm_compare (dist_tria->get_communicator(), comm_mf,
-                              &communicators_same);
+            const int ierr = MPI_Comm_compare (dist_tria->get_communicator(), comm_mf,
+                                               &communicators_same);
+            AssertThrowMPI (ierr);
             Assert (communicators_same == MPI_IDENT ||
                     communicators_same == MPI_CONGRUENT,
                     ExcMessage ("MPI communicator in parallel::distributed::Triangulation "
index 143cbbefc65de131e0f8e5111d5eefe15cc4ebed..8b525c4e90bb5aa144f296e7b46f81bd2a893964 100644 (file)
@@ -203,7 +203,8 @@ MGLevelGlobalTransfer<VectorType>::copy_to_mg
   reinit_vector(mg_dof_handler, component_to_block_map, dst);
 #ifdef DEBUG_OUTPUT
   std::cout << "copy_to_mg src " << src.l2_norm() << std::endl;
-  MPI_Barrier(MPI_COMM_WORLD);
+  int ierr = MPI_Barrier(MPI_COMM_WORLD);
+  AssertThrowMPI(ierr);
 #endif
 
   if (perform_plain_copy)
@@ -220,7 +221,8 @@ MGLevelGlobalTransfer<VectorType>::copy_to_mg
     {
       --level;
 #ifdef DEBUG_OUTPUT
-      MPI_Barrier(MPI_COMM_WORLD);
+      ierr = MPI_Barrier(MPI_COMM_WORLD);
+      AssertThrowMPI(ierr);
 #endif
 
       typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator dof_pair_iterator;
@@ -240,7 +242,8 @@ MGLevelGlobalTransfer<VectorType>::copy_to_mg
       dst_level.compress(VectorOperation::insert);
 
 #ifdef DEBUG_OUTPUT
-      MPI_Barrier(MPI_COMM_WORLD);
+      ierr = MPI_Barrier(MPI_COMM_WORLD);
+      AssertThrowMPI(ierr);
       std::cout << "copy_to_mg dst " << level << " " << dst_level.l2_norm() << std::endl;
 #endif
     }
@@ -273,9 +276,11 @@ MGLevelGlobalTransfer<VectorType>::copy_from_mg
   for (unsigned int level=src.min_level(); level<=src.max_level(); ++level)
     {
 #ifdef DEBUG_OUTPUT
-      MPI_Barrier(MPI_COMM_WORLD);
+      int ierr = MPI_Barrier(MPI_COMM_WORLD);
+      AssertThrowMPI(ierr);
       std::cout << "copy_from_mg src " << level << " " << src[level].l2_norm() << std::endl;
-      MPI_Barrier(MPI_COMM_WORLD);
+      ierr = MPI_Barrier(MPI_COMM_WORLD);
+      AssertThrowMPI(ierr);
 #endif
 
       typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator dof_pair_iterator;
@@ -295,14 +300,16 @@ MGLevelGlobalTransfer<VectorType>::copy_from_mg
 #ifdef DEBUG_OUTPUT
       {
         dst.compress(VectorOperation::insert);
-        MPI_Barrier(MPI_COMM_WORLD);
+        ierr = MPI_Barrier(MPI_COMM_WORLD);
+        AssertThrowMPI(ierr);
         std::cout << "copy_from_mg level=" << level << " " << dst.l2_norm() << std::endl;
       }
 #endif
     }
   dst.compress(VectorOperation::insert);
 #ifdef DEBUG_OUTPUT
-  MPI_Barrier(MPI_COMM_WORLD);
+  const int ierr = MPI_Barrier(MPI_COMM_WORLD);
+  AssertThrowMPI(ierr);
   std::cout << "copy_from_mg " << dst.l2_norm() << std::endl;
 #endif
 }
index e277a25b89a0734315b449f6069867d2a5c0db42..6f04aa2ace457292f610d8536da7731a646f9b93 100644 (file)
@@ -7607,9 +7607,10 @@ namespace VectorTools
         double my_values[3] = { mean_double.real(), mean_double.imag(), area };
         double global_values[3];
 
-        MPI_Allreduce (my_values, global_values, 3, MPI_DOUBLE,
-                       MPI_SUM,
-                       p_triangulation->get_communicator());
+        const int ierr = MPI_Allreduce (my_values, global_values, 3, MPI_DOUBLE,
+                                        MPI_SUM,
+                                        p_triangulation->get_communicator());
+        AssertThrowMPI (ierr);
 
         set_possibly_complex_number(global_values[0], global_values[1],
                                     mean);
index ad6c48e0d58c79e78a36bb8c6bd759efb95c2bc7..f137ec11c7d34bc5683aa701dd1e2601207ae57c 100644 (file)
@@ -6156,28 +6156,28 @@ void DataOutInterface<dim,spacedim>::write_vtu_in_parallel (const char *filename
   write_vtu (f);
 #else
 
-  int myrank, nproc, err;
-  MPI_Comm_rank(comm, &myrank);
-  MPI_Comm_size(comm, &nproc);
+  int myrank, nproc;
+  int ierr = MPI_Comm_rank(comm, &myrank);
+  AssertThrowMPI(ierr);
+  ierr = MPI_Comm_size(comm, &nproc);
+  AssertThrowMPI(ierr);
 
   MPI_Info info;
-  MPI_Info_create(&info);
+  ierr = MPI_Info_create(&info);
+  AssertThrowMPI(ierr);
   MPI_File fh;
-  err = MPI_File_open(comm, const_cast<char *>(filename),
-                      MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh);
-  AssertThrow(err==0,
-              ExcMessage("Unable to open file <"
-                         + std::string(filename) +
-                         "> with MPI_File_open. The error code "
-                         "returned was "
-                         + Utilities::to_string(err) + "."));
+  ierr = MPI_File_open(comm, const_cast<char *>(filename),
+                       MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh);
+  AssertThrowMPI(ierr);
 
-
-  MPI_File_set_size(fh, 0); // delete the file contents
+  ierr = MPI_File_set_size(fh, 0); // delete the file contents
+  AssertThrowMPI(ierr);
   // this barrier is necessary, because otherwise others might already
   // write while one core is still setting the size to zero.
-  MPI_Barrier(comm);
-  MPI_Info_free(&info);
+  ierr = MPI_Barrier(comm);
+  AssertThrowMPI(ierr);
+  ierr = MPI_Info_free(&info);
+  AssertThrowMPI(ierr);
 
   unsigned int header_size;
 
@@ -6187,18 +6187,24 @@ void DataOutInterface<dim,spacedim>::write_vtu_in_parallel (const char *filename
       std::stringstream ss;
       DataOutBase::write_vtu_header(ss, vtk_flags);
       header_size = ss.str().size();
-      MPI_File_write(fh, const_cast<char *>(ss.str().c_str()), header_size, MPI_CHAR, MPI_STATUS_IGNORE);
+      ierr = MPI_File_write(fh, const_cast<char *>(ss.str().c_str()), header_size,
+                            MPI_CHAR, MPI_STATUS_IGNORE);
+      AssertThrowMPI(ierr);
     }
 
-  MPI_Bcast(&header_size, 1, MPI_UNSIGNED, 0, comm);
+  ierr = MPI_Bcast(&header_size, 1, MPI_UNSIGNED, 0, comm);
+  AssertThrowMPI(ierr);
 
-  MPI_File_seek_shared( fh, header_size, MPI_SEEK_SET );
+  ierr = MPI_File_seek_shared( fh, header_size, MPI_SEEK_SET );
+  AssertThrowMPI(ierr);
   {
     std::stringstream ss;
     DataOutBase::write_vtu_main (get_patches(), get_dataset_names(),
                                  get_vector_data_ranges(),
                                  vtk_flags, ss);
-    MPI_File_write_ordered(fh, const_cast<char *>(ss.str().c_str()), ss.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
+    ierr = MPI_File_write_ordered(fh, const_cast<char *>(ss.str().c_str()),
+                                  ss.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
+    AssertThrowMPI(ierr);
   }
 
   //write footer
@@ -6207,9 +6213,12 @@ void DataOutInterface<dim,spacedim>::write_vtu_in_parallel (const char *filename
       std::stringstream ss;
       DataOutBase::write_vtu_footer(ss);
       unsigned int footer_size = ss.str().size();
-      MPI_File_write_shared(fh, const_cast<char *>(ss.str().c_str()), footer_size, MPI_CHAR, MPI_STATUS_IGNORE);
+      ierr = MPI_File_write_shared(fh, const_cast<char *>(ss.str().c_str()),
+                                   footer_size, MPI_CHAR, MPI_STATUS_IGNORE);
+      AssertThrowMPI(ierr);
     }
-  MPI_File_close( &fh );
+  ierr = MPI_File_close( &fh );
+  AssertThrowMPI(ierr);
 #endif
 }
 
@@ -6469,8 +6478,10 @@ create_xdmf_entry (const DataOutBase::DataOutFilter &data_filter,
 
   // And compute the global total
 #ifdef DEAL_II_WITH_MPI
-  MPI_Comm_rank(comm, &myrank);
-  MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
+  int ierr = MPI_Comm_rank(comm, &myrank);
+  AssertThrowMPI(ierr);
+  ierr = MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
+  AssertThrowMPI(ierr);
 #else
   myrank = 0;
   global_node_cell_count[0] = local_node_cell_count[0];
@@ -6507,7 +6518,8 @@ write_xdmf_file (const std::vector<XDMFEntry> &entries,
   int             myrank;
 
 #ifdef DEAL_II_WITH_MPI
-  MPI_Comm_rank(comm, &myrank);
+  const int ierr = MPI_Comm_rank(comm, &myrank);
+  AssertThrowMPI(ierr);
 #else
   (void)comm;
   myrank = 0;
@@ -6737,6 +6749,8 @@ void DataOutBase::write_hdf5_parallel (const std::vector<Patch<dim,spacedim> > &
                                        const std::string &solution_filename,
                                        MPI_Comm comm)
 {
+  int ierr;
+  (void)ierr;
 #ifndef DEAL_II_WITH_HDF5
   // throw an exception, but first make
   // sure the compiler does not warn about
@@ -6778,7 +6792,8 @@ void DataOutBase::write_hdf5_parallel (const std::vector<Patch<dim,spacedim> > &
 #ifndef H5_HAVE_PARALLEL
 #  ifdef DEAL_II_WITH_MPI
   int world_size;
-  MPI_Comm_size(comm, &world_size);
+  ierr = MPI_Comm_size(comm, &world_size);
+  AssertThrowMPI(ierr);
   AssertThrow (world_size <= 1,
                ExcMessage ("Serial HDF5 output on multiple processes is not yet supported."));
 #  endif
@@ -6802,8 +6817,10 @@ void DataOutBase::write_hdf5_parallel (const std::vector<Patch<dim,spacedim> > &
   // Compute the global total number of nodes/cells
   // And determine the offset of the data for this process
 #ifdef DEAL_II_WITH_MPI
-  MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
-  MPI_Scan(local_node_cell_count, global_node_cell_offsets, 2, MPI_UNSIGNED, MPI_SUM, comm);
+  ierr = MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
+  AssertThrowMPI(ierr);
+  ierr = MPI_Scan(local_node_cell_count, global_node_cell_offsets, 2, MPI_UNSIGNED, MPI_SUM, comm);
+  AssertThrowMPI(ierr);
   global_node_cell_offsets[0] -= local_node_cell_count[0];
   global_node_cell_offsets[1] -= local_node_cell_count[1];
 #else
index 6126766db58fd0ea8ccfeb519a1639d69944ed8d..778384dc5067f32b2563d8c622e72e2a559e525f 100644 (file)
@@ -578,9 +578,11 @@ IndexSet::is_ascending_and_one_to_one (const MPI_Comm &communicator) const
   const unsigned int gather_size = (my_rank==0)?n_ranks:1;
   std::vector<types::global_dof_index> global_dofs(gather_size);
 
-  MPI_Gather(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
-             &(global_dofs[0]), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0,
-             communicator);
+  int ierr = MPI_Gather(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+                        &(global_dofs[0]), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0,
+                        communicator);
+  AssertThrowMPI(ierr);
+
   if (my_rank == 0)
     {
       // find out if the received std::vector is ascending
@@ -604,7 +606,8 @@ IndexSet::is_ascending_and_one_to_one (const MPI_Comm &communicator) const
 
   // now broadcast the result
   int is_ascending = is_globally_ascending ? 1 : 0;
-  MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
+  ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
+  AssertThrowMPI(ierr);
 
   return (is_ascending==1);
 #else
index 2e3d95a7fd89d09cb2ff059844f4363f39deb035..3077c4d98073587bae99fd70bceccd370fd625f2 100644 (file)
@@ -81,7 +81,8 @@ namespace Utilities
     unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
     {
       int n_jobs=1;
-      (void) MPI_Comm_size (mpi_communicator, &n_jobs);
+      const int ierr = MPI_Comm_size (mpi_communicator, &n_jobs);
+      AssertThrowMPI(ierr);
 
       return n_jobs;
     }
@@ -90,7 +91,8 @@ namespace Utilities
     unsigned int this_mpi_process (const MPI_Comm &mpi_communicator)
     {
       int rank=0;
-      (void) MPI_Comm_rank (mpi_communicator, &rank);
+      const int ierr = MPI_Comm_rank (mpi_communicator, &rank);
+      AssertThrowMPI(ierr);
 
       return rank;
     }
@@ -99,7 +101,8 @@ namespace Utilities
     MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
     {
       MPI_Comm new_communicator;
-      MPI_Comm_dup (mpi_communicator, &new_communicator);
+      const int ierr = MPI_Comm_dup (mpi_communicator, &new_communicator);
+      AssertThrowMPI(ierr);
       return new_communicator;
     }
 
@@ -142,9 +145,10 @@ namespace Utilities
       // processors in this case, which is more expensive than the reduction
       // operation above in MPI_Allreduce)
       std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
-      MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
-                     &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
-                     mpi_comm);
+      const int ierr = MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
+                                      &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+                                      mpi_comm);
+      AssertThrowMPI(ierr);
 
       // now we know who is going to communicate with whom. collect who is
       // going to communicate with us!
@@ -236,7 +240,7 @@ namespace Utilities
 
       MPI_Op op;
       int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+      AssertThrowMPI(ierr);
 
       MinMaxAvg in;
       in.sum = in.min = in.max = my_value;
@@ -248,18 +252,18 @@ namespace Utilities
       MPI_Datatype types[]= {MPI_DOUBLE, MPI_INT};
 
       ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+      AssertThrowMPI(ierr);
 
       ierr = MPI_Type_commit(&type);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+      AssertThrowMPI(ierr);
       ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+      AssertThrowMPI(ierr);
 
       ierr = MPI_Type_free (&type);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+      AssertThrowMPI(ierr);
 
       ierr = MPI_Op_free(&op);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+      AssertThrowMPI(ierr);
 
       result.avg = result.sum / numproc;
 
@@ -324,19 +328,19 @@ namespace Utilities
       // if we have PETSc, we will initialize it and let it handle MPI.
       // Otherwise, we will do it.
       int MPI_has_been_started = 0;
-      MPI_Initialized(&MPI_has_been_started);
+      int ierr = MPI_Initialized(&MPI_has_been_started);
+      AssertThrowMPI(ierr);
       AssertThrow (MPI_has_been_started == 0,
                    ExcMessage ("MPI error. You can only start MPI once!"));
 
-      int mpi_err, provided;
-      // this works like mpi_err = MPI_Init (&argc, &argv); but tells MPI that
+      int provided;
+      // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
       // we might use several threads but never call two MPI functions at the
       // same time. For an explanation see on why we do this see
       // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
       int wanted = MPI_THREAD_SERIALIZED;
-      mpi_err = MPI_Init_thread(&argc, &argv, wanted, &provided);
-      AssertThrow (mpi_err == 0,
-                   ExcMessage ("MPI could not be initialized."));
+      ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
+      AssertThrowMPI(ierr);
 
       // disable for now because at least some implementations always return
       // MPI_THREAD_SINGLE.
@@ -397,9 +401,10 @@ namespace Utilities
 
           std::vector<char> all_hostnames(max_hostname_size *
                                           MPI::n_mpi_processes(MPI_COMM_WORLD));
-          MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
-                         &all_hostnames[0], max_hostname_size, MPI_CHAR,
-                         MPI_COMM_WORLD);
+          const int ierr MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
+                                        &all_hostnames[0], max_hostname_size, MPI_CHAR,
+                                        MPI_COMM_WORLD);
+          AssertThrowMPI(ierr);
 
           // search how often our own hostname appears and the how-manyth
           // instance the current process represents
@@ -517,9 +522,8 @@ namespace Utilities
             }
           else
             {
-              const int mpi_err = MPI_Finalize();
-              AssertThrow (mpi_err == 0,
-                           ExcMessage ("An error occurred while calling MPI_Finalize()"));
+              const int ierr = MPI_Finalize();
+              AssertThrowMPI(ierr);
             }
         }
 #endif
@@ -531,7 +535,8 @@ namespace Utilities
     {
 #ifdef DEAL_II_WITH_MPI
       int MPI_has_been_started = 0;
-      MPI_Initialized(&MPI_has_been_started);
+      const int ierr = MPI_Initialized(&MPI_has_been_started);
+      AssertThrowMPI(ierr);
 
       return (MPI_has_been_started > 0);
 #else
index abeae873e4aff05462c5dac065e6512d9f614a07..05ba75a923a2985db7ab64ba5c7f3b0d437581fd 100644 (file)
@@ -183,13 +183,15 @@ namespace Utilities
       // Allow non-zero start index for the vector. send this data to all
       // processors
       first_index[0] = local_range_data.first;
-      MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE,
-                0, communicator);
+      int ierr = MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+                           0, communicator);
+      AssertThrowMPI(ierr);
 
       // Get the end-of-local_range for all processors
-      MPI_Allgather(&local_range_data.second, 1,
-                    DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1,
-                    DEAL_II_DOF_INDEX_MPI_TYPE, communicator);
+      ierr = MPI_Allgather(&local_range_data.second, 1,
+                           DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1,
+                           DEAL_II_DOF_INDEX_MPI_TYPE, communicator);
+      AssertThrowMPI(ierr);
       first_index[n_procs] = global_size;
 
       // fix case when there are some processors without any locally owned
@@ -261,8 +263,9 @@ namespace Utilities
         for (unsigned int i=0; i<n_ghost_targets; i++)
           send_buffer[ghost_targets_data[i].first] = ghost_targets_data[i].second;
 
-        MPI_Alltoall (&send_buffer[0], 1, MPI_INT, &receive_buffer[0], 1,
-                      MPI_INT, communicator);
+        const int ierr = MPI_Alltoall (&send_buffer[0], 1, MPI_INT, &receive_buffer[0], 1,
+                                       MPI_INT, communicator);
+        AssertThrowMPI(ierr);
 
         // allocate memory for import data
         std::vector<std::pair<unsigned int,unsigned int> > import_targets_temp;
@@ -285,11 +288,13 @@ namespace Utilities
         std::vector<MPI_Request> import_requests (import_targets_data.size());
         for (unsigned int i=0; i<import_targets_data.size(); i++)
           {
-            MPI_Irecv (&expanded_import_indices[current_index_start],
-                       import_targets_data[i].second,
-                       DEAL_II_DOF_INDEX_MPI_TYPE,
-                       import_targets_data[i].first, import_targets_data[i].first,
-                       communicator, &import_requests[i]);
+            const int ierr = MPI_Irecv (&expanded_import_indices[current_index_start],
+                                        import_targets_data[i].second,
+                                        DEAL_II_DOF_INDEX_MPI_TYPE,
+                                        import_targets_data[i].first,
+                                        import_targets_data[i].first,
+                                        communicator, &import_requests[i]);
+            AssertThrowMPI(ierr);
             current_index_start += import_targets_data[i].second;
           }
         AssertDimension (current_index_start, n_import_indices_data);
@@ -298,17 +303,22 @@ namespace Utilities
         current_index_start = 0;
         for (unsigned int i=0; i<n_ghost_targets; i++)
           {
-            MPI_Send (&expanded_ghost_indices[current_index_start],
-                      ghost_targets_data[i].second, DEAL_II_DOF_INDEX_MPI_TYPE,
-                      ghost_targets_data[i].first, my_pid,
-                      communicator);
+            const int ierr = MPI_Send (&expanded_ghost_indices[current_index_start],
+                                       ghost_targets_data[i].second, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                       ghost_targets_data[i].first, my_pid,
+                                       communicator);
+            AssertThrowMPI(ierr);
             current_index_start += ghost_targets_data[i].second;
           }
         AssertDimension (current_index_start, n_ghost_indices_data);
 
         if (import_requests.size()>0)
-          MPI_Waitall (import_requests.size(), &import_requests[0],
-                       MPI_STATUSES_IGNORE);
+          {
+            const int ierr = MPI_Waitall (import_requests.size(),
+                                          &import_requests[0],
+                                          MPI_STATUSES_IGNORE);
+            AssertThrowMPI(ierr);
+          }
 
         // transform import indices to local index space and compress
         // contiguous indices in form of ranges
@@ -363,8 +373,9 @@ namespace Utilities
       if (Utilities::MPI::job_supports_mpi())
         {
           int communicators_same = 0;
-          MPI_Comm_compare (part.communicator, communicator,
-                            &communicators_same);
+          const int ierr = MPI_Comm_compare (part.communicator, communicator,
+                                             &communicators_same);
+          AssertThrowMPI(ierr);
           if (!(communicators_same == MPI_IDENT ||
                 communicators_same == MPI_CONGRUENT))
             return false;
index a9f0f7e401adb22500f886ee88753fe01d46d1db..91a92d3fa81ee3fbe369b385422fe8f6a6c64447 100644 (file)
@@ -116,7 +116,10 @@ void Timer::start ()
 
 #ifdef DEAL_II_WITH_MPI
   if (sync_wall_time)
-    MPI_Barrier(mpi_communicator);
+    {
+      const int ierr = MPI_Barrier(mpi_communicator);
+      AssertThrowMPI(ierr);
+    }
 #endif
 
 #if defined(DEAL_II_HAVE_SYS_TIME_H) && defined(DEAL_II_HAVE_SYS_RESOURCE_H)
index 66055ba1ab4f9c8436545d0049b81a3c322a684b..c361bde0f10b6d625cd6acaef0d1ce31c2bf3ef6 100644 (file)
@@ -784,7 +784,8 @@ namespace Utilities
         {
           MPI_Comm comm = mpi_comm->GetMpiComm();
           *mpi_comm = Epetra_MpiComm(MPI_COMM_SELF);
-          MPI_Comm_free (&comm);
+          const int ierr = MPI_Comm_free (&comm);
+          AssertThrowMPI(ierr);
         }
 #endif
     }
index 32fe0de9dc1d34c4f4f4c8a0260e4c9709aa2cca..3c43d26ce89e3cd93be33174c20972b1d25846a8 100644 (file)
@@ -97,8 +97,9 @@ namespace
 
     // compute the minimum on
     // processor zero
-    MPI_Reduce (comp, result, 2, MPI_DOUBLE,
-                MPI_MIN, 0, mpi_communicator);
+    const int ierr = MPI_Reduce (comp, result, 2, MPI_DOUBLE,
+                                 MPI_MIN, 0, mpi_communicator);
+    AssertThrowMPI(ierr);
 
     // make sure only processor zero
     // got something
@@ -131,8 +132,9 @@ namespace
     double result = 0;
     // compute the minimum on
     // processor zero
-    MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE,
-                MPI_SUM, 0, mpi_communicator);
+    const int ierr = MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE,
+                                 MPI_SUM, 0, mpi_communicator);
+    AssertThrowMPI(ierr);
 
     // make sure only processor zero
     // got something
@@ -274,8 +276,9 @@ namespace
 
       do
         {
-          MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
-                     master_mpi_rank, mpi_communicator);
+          int ierr = MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+                                master_mpi_rank, mpi_communicator);
+          AssertThrowMPI(ierr);
 
           if (interesting_range[0] == interesting_range[1])
             return interesting_range[0];
@@ -300,8 +303,9 @@ namespace
                                                      test_threshold));
 
           unsigned int total_count;
-          MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED,
-                      MPI_SUM, master_mpi_rank, mpi_communicator);
+          ierr = MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED,
+                             MPI_SUM, master_mpi_rank, mpi_communicator);
+          AssertThrowMPI(ierr);
 
           // now adjust the range. if
           // we have to many cells, we
@@ -369,8 +373,9 @@ namespace
 
       do
         {
-          MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
-                     master_mpi_rank, mpi_communicator);
+          int ierr = MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+                                master_mpi_rank, mpi_communicator);
+          AssertThrowMPI(ierr);
 
           if (interesting_range[0] == interesting_range[1])
             {
@@ -384,8 +389,9 @@ namespace
               // actual largest value
               double final_threshold =  std::min (interesting_range[0],
                                                   global_min_and_max.second);
-              MPI_Bcast (&final_threshold, 1, MPI_DOUBLE,
-                         master_mpi_rank, mpi_communicator);
+              ierr = MPI_Bcast (&final_threshold, 1, MPI_DOUBLE,
+                                master_mpi_rank, mpi_communicator);
+              AssertThrowMPI(ierr);
 
               return final_threshold;
             }
@@ -406,8 +412,9 @@ namespace
               my_error += criteria(i);
 
           double total_error;
-          MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE,
-                      MPI_SUM, master_mpi_rank, mpi_communicator);
+          ierr = MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE,
+                             MPI_SUM, master_mpi_rank, mpi_communicator);
+          AssertThrowMPI(ierr);
 
           // now adjust the range. if we have to many cells, we take
           // the upper half of the previous range, otherwise the lower
index f277ad2cce1c4f63198e93f2337005c03d82dae9..ca1f0541213e18315d988ee7c49e2fb645768300 100644 (file)
@@ -1927,8 +1927,8 @@ namespace parallel
           // Check that level_ghost_owners is symmetric by sending a message
           // to everyone
           {
-
-            MPI_Barrier(this->mpi_communicator);
+            int ierr = MPI_Barrier(this->mpi_communicator);
+            AssertThrowMPI(ierr);
 
             // important: preallocate to avoid (re)allocation:
             std::vector<MPI_Request> requests (this->number_cache.level_ghost_owners.size());
@@ -1942,9 +1942,10 @@ namespace parallel
                 Assert (typeid(types::subdomain_id)
                         == typeid(unsigned int),
                         ExcNotImplemented());
-                MPI_Isend(&dummy, 1, MPI_UNSIGNED,
-                          *it, 9001, this->mpi_communicator,
-                          &requests[req_counter]);
+                ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED,
+                                 *it, 9001, this->mpi_communicator,
+                                 &requests[req_counter]);
+                AssertThrowMPI(ierr);
               }
 
             for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
@@ -1955,15 +1956,20 @@ namespace parallel
                         == typeid(unsigned int),
                         ExcNotImplemented());
                 unsigned int dummy;
-                MPI_Recv(&dummy, 1, MPI_UNSIGNED,
-                         *it, 9001, this->mpi_communicator,
-                         MPI_STATUS_IGNORE);
+                ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED,
+                                *it, 9001, this->mpi_communicator,
+                                MPI_STATUS_IGNORE);
+                AssertThrowMPI(ierr);
               }
 
             if (requests.size() > 0)
-              MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+              {
+                ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+                AssertThrowMPI(ierr);
+              }
 
-            MPI_Barrier(this->mpi_communicator);
+            ierr = MPI_Barrier(this->mpi_communicator);
+            AssertThrowMPI(ierr);
           }
 #endif
 
@@ -3198,9 +3204,10 @@ namespace parallel
           // that the packet has been
           // received
           it->second.pack_data (*buffer);
-          MPI_Isend(&(*buffer)[0], buffer->size(),
-                    MPI_BYTE, it->first,
-                    123, this->get_communicator(), &requests[idx]);
+          const int ierr = MPI_Isend(&(*buffer)[0], buffer->size(),
+                                     MPI_BYTE, it->first,
+                                     123, this->get_communicator(), &requests[idx]);
+          AssertThrowMPI(ierr);
         }
 
       Assert(destinations.size()==needs_to_get_cells.size(), ExcInternalError());
@@ -3218,13 +3225,16 @@ namespace parallel
         {
           MPI_Status status;
           int len;
-          MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status);
-          MPI_Get_count(&status, MPI_BYTE, &len);
+          int ierr = MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status);
+          AssertThrowMPI(ierr);
+          ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+          AssertThrowMPI(ierr);
           receive.resize(len);
 
           char *ptr = &receive[0];
-          MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                   this->get_communicator(), &status);
+          ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                          this->get_communicator(), &status);
+          AssertThrowMPI(ierr);
 
           cellinfo.unpack_data(receive);
           const unsigned int cells = cellinfo.tree_index.size();
@@ -3250,7 +3260,10 @@ namespace parallel
       // complete all sends, so that we can
       // safely destroy the buffers.
       if (requests.size() > 0)
-        MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+        {
+          const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+        }
 
       //check all msgs got sent and received
       Assert(Utilities::MPI::sum(needs_to_get_cells.size(), this->get_communicator())
index af05030c335a8d0d338e8505af2ae449060137bd..46e4f94af7d34e6dba7270cf0204ae0669be0449 100644 (file)
@@ -197,13 +197,14 @@ namespace parallel
 
     unsigned int send_value
       = number_cache.n_locally_owned_active_cells[my_subdomain];
-    MPI_Allgather (&send_value,
-                   1,
-                   MPI_UNSIGNED,
-                   &number_cache.n_locally_owned_active_cells[0],
-                   1,
-                   MPI_UNSIGNED,
-                   this->mpi_communicator);
+    const int ierr = MPI_Allgather (&send_value,
+                                    1,
+                                    MPI_UNSIGNED,
+                                    &number_cache.n_locally_owned_active_cells[0],
+                                    1,
+                                    MPI_UNSIGNED,
+                                    this->mpi_communicator);
+    AssertThrowMPI(ierr);
 
     number_cache.n_global_active_cells
       = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
index a2004fd2f03e95606dc9f639dc96984d34bb3e81..c9ad02a385bbd1560c524040f3a03d1124e5f8ba 100644 (file)
@@ -1160,9 +1160,10 @@ namespace internal
               types::global_dof_index shift = 0;
               //set rcounts based on new_numbers:
               int cur_count = new_numbers_copy.size ();
-              MPI_Allgather (&cur_count,  1, MPI_INT,
-                             &rcounts[0], 1, MPI_INT,
-                             tr->get_communicator ());
+              int ierr = MPI_Allgather (&cur_count,  1, MPI_INT,
+                                        &rcounts[0], 1, MPI_INT,
+                                        tr->get_communicator ());
+              AssertThrowMPI(ierr);
 
               for (unsigned int i = 0; i < n_cpu; i++)
                 {
@@ -1172,12 +1173,13 @@ namespace internal
               Assert(((int)new_numbers_copy.size()) ==
                      rcounts[Utilities::MPI::this_mpi_process (tr->get_communicator ())],
                      ExcInternalError());
-              MPI_Allgatherv (&new_numbers_copy[0],     new_numbers_copy.size (),
-                              DEAL_II_DOF_INDEX_MPI_TYPE,
-                              &gathered_new_numbers[0], &rcounts[0],
-                              &displs[0],
-                              DEAL_II_DOF_INDEX_MPI_TYPE,
-                              tr->get_communicator ());
+              ierr = MPI_Allgatherv (&new_numbers_copy[0],     new_numbers_copy.size (),
+                                     DEAL_II_DOF_INDEX_MPI_TYPE,
+                                     &gathered_new_numbers[0], &rcounts[0],
+                                     &displs[0],
+                                     DEAL_II_DOF_INDEX_MPI_TYPE,
+                                     tr->get_communicator ());
+              AssertThrowMPI(ierr);
             }
 
             // put new numbers according to the current locally_owned_dofs_per_processor IndexSets
@@ -1641,9 +1643,10 @@ namespace internal
               // that the packet has been
               // received
               it->second.pack_data (sendbuffers[idx]);
-              MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
-                        MPI_BYTE, it->first,
-                        1100101, tria.get_communicator(), &requests[idx]);
+              const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
+                                         MPI_BYTE, it->first,
+                                         1100101, tria.get_communicator(), &requests[idx]);
+              AssertThrowMPI(ierr);
             }
 
           //* receive requests and reply
@@ -1657,13 +1660,16 @@ namespace internal
 
               MPI_Status status;
               int len;
-              MPI_Probe(MPI_ANY_SOURCE, 1100101, tria.get_communicator(), &status);
-              MPI_Get_count(&status, MPI_BYTE, &len);
+              int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100101, tria.get_communicator(), &status);
+              AssertThrowMPI(ierr);
+              ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+              AssertThrowMPI(ierr);
               receive.resize(len);
 
               char *ptr = &receive[0];
-              MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                       tria.get_communicator(), &status);
+              ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                              tria.get_communicator(), &status);
+              AssertThrowMPI(ierr);
 
               cellinfo.unpack_data(receive);
 
@@ -1688,9 +1694,10 @@ namespace internal
 
               //send reply
               cellinfo.pack_data(reply_buffers[idx]);
-              MPI_Isend(&(reply_buffers[idx])[0], reply_buffers[idx].size(),
-                        MPI_BYTE, status.MPI_SOURCE,
-                        1100102, tria.get_communicator(), &reply_requests[idx]);
+              ierr = MPI_Isend(&(reply_buffers[idx])[0], reply_buffers[idx].size(),
+                               MPI_BYTE, status.MPI_SOURCE,
+                               1100102, tria.get_communicator(), &reply_requests[idx]);
+              AssertThrowMPI(ierr);
             }
 
           // * finally receive the replies
@@ -1701,13 +1708,16 @@ namespace internal
 
               MPI_Status status;
               int len;
-              MPI_Probe(MPI_ANY_SOURCE, 1100102, tria.get_communicator(), &status);
-              MPI_Get_count(&status, MPI_BYTE, &len);
+              int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100102, tria.get_communicator(), &status);
+              AssertThrowMPI(ierr);
+              ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+              AssertThrowMPI(ierr);
               receive.resize(len);
 
               char *ptr = &receive[0];
-              MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                       tria.get_communicator(), &status);
+              ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                              tria.get_communicator(), &status);
+              AssertThrowMPI(ierr);
 
               cellinfo.unpack_data(receive);
               if (cellinfo.tree_index.size()==0)
@@ -1739,9 +1749,15 @@ namespace internal
           // complete all sends, so that we can
           // safely destroy the buffers.
           if (requests.size() > 0)
-            MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+            {
+              const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+            }
           if (reply_requests.size() > 0)
-            MPI_Waitall(reply_requests.size(), &reply_requests[0], MPI_STATUSES_IGNORE);
+            {
+              const int ierr = MPI_Waitall(reply_requests.size(), &reply_requests[0], MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+            }
 
         }
 
@@ -1909,9 +1925,10 @@ namespace internal
               // that the packet has been
               // received
               it->second.pack_data (*buffer);
-              MPI_Isend(&(*buffer)[0], buffer->size(),
-                        MPI_BYTE, it->first,
-                        123, tr->get_communicator(), &requests[idx]);
+              const int ierr = MPI_Isend(&(*buffer)[0], buffer->size(),
+                                         MPI_BYTE, it->first,
+                                         123, tr->get_communicator(), &requests[idx]);
+              AssertThrowMPI(ierr);
             }
 
 
@@ -1955,13 +1972,16 @@ namespace internal
             {
               MPI_Status status;
               int len;
-              MPI_Probe(MPI_ANY_SOURCE, 123, tr->get_communicator(), &status);
-              MPI_Get_count(&status, MPI_BYTE, &len);
+              int ierr = MPI_Probe(MPI_ANY_SOURCE, 123, tr->get_communicator(), &status);
+              AssertThrowMPI(ierr);
+              ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+              AssertThrowMPI(ierr);
               receive.resize(len);
 
               char *ptr = &receive[0];
-              MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                       tr->get_communicator(), &status);
+              ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                              tr->get_communicator(), &status);
+              AssertThrowMPI(ierr);
 
               typename types<dim>::cellinfo cellinfo;
               cellinfo.unpack_data(receive);
@@ -1994,7 +2014,10 @@ namespace internal
           // complete all sends, so that we can
           // safely destroy the buffers.
           if (requests.size() > 0)
-            MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+            {
+              const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+            }
 
 
 #ifdef DEBUG
@@ -2005,8 +2028,10 @@ namespace internal
             unsigned int sent=needs_to_get_cells.size();
             unsigned int recv=senders.size();
 
-            MPI_Allreduce(&sent, &sum_send, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
-            MPI_Allreduce(&recv, &sum_recv, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
+            int ierr = MPI_Allreduce(&sent, &sum_send, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
+            AssertThrowMPI(ierr);
+            ierr = MPI_Allreduce(&recv, &sum_recv, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
+            AssertThrowMPI(ierr);
             Assert(sum_send==sum_recv, ExcInternalError());
           }
 #endif
@@ -2037,7 +2062,8 @@ namespace internal
           // processors from which we expect
           // messages, and by using different
           // tags for phase 1 and 2
-          MPI_Barrier(tr->get_communicator());
+          const int ierr = MPI_Barrier(tr->get_communicator());
+          AssertThrowMPI(ierr);
 #endif
         }
 
@@ -2132,11 +2158,12 @@ namespace internal
         //shift ids to make them unique
         number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus);
 
-        MPI_Allgather ( &number_cache.n_locally_owned_dofs,
-                        1, DEAL_II_DOF_INDEX_MPI_TYPE,
-                        &number_cache.n_locally_owned_dofs_per_processor[0],
-                        1, DEAL_II_DOF_INDEX_MPI_TYPE,
-                        tr->get_communicator());
+        const int ierr = MPI_Allgather ( &number_cache.n_locally_owned_dofs,
+                                         1, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                         &number_cache.n_locally_owned_dofs_per_processor[0],
+                                         1, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                         tr->get_communicator());
+        AssertThrowMPI(ierr);
 
         const dealii::types::global_dof_index
         shift = std::accumulate (number_cache
@@ -2365,11 +2392,12 @@ namespace internal
             //shift ids to make them unique
             number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus);
 
-            MPI_Allgather ( &number_cache.n_locally_owned_dofs,
-                            1, DEAL_II_DOF_INDEX_MPI_TYPE,
-                            &number_cache.n_locally_owned_dofs_per_processor[0],
-                            1, DEAL_II_DOF_INDEX_MPI_TYPE,
-                            tr->get_communicator());
+            int ierr = MPI_Allgather ( &number_cache.n_locally_owned_dofs,
+                                       1, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                       &number_cache.n_locally_owned_dofs_per_processor[0],
+                                       1, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                       tr->get_communicator());
+            AssertThrowMPI(ierr);
 
             const dealii::types::global_dof_index
             shift = std::accumulate (number_cache
@@ -2466,7 +2494,8 @@ namespace internal
 
           // This barrier is crucial so that messages between phase 1&2 don't
           // mix.
-          MPI_Barrier(tr->get_communicator());
+          const int ierr = MPI_Barrier(tr->get_communicator());
+          AssertThrowMPI(ierr);
 
           // Phase 2, only request the cells that were not completed in Phase
           // 1.
@@ -2724,9 +2753,10 @@ namespace internal
           my_data.resize(max_size);
 
           std::vector<char> buffer(max_size*n_cpus);
-          MPI_Allgather(&my_data[0], max_size, MPI_BYTE,
-                        &buffer[0], max_size, MPI_BYTE,
-                        tr->get_communicator());
+          const int ierr = MPI_Allgather(&my_data[0], max_size, MPI_BYTE,
+                                         &buffer[0], max_size, MPI_BYTE,
+                                         tr->get_communicator());
+          AssertThrowMPI(ierr);
 
           number_cache.locally_owned_dofs_per_processor.resize (n_cpus);
           number_cache.n_locally_owned_dofs_per_processor.resize (n_cpus);
index cd1af2a3a827778891c2291fdbd2e4d2b66cdd72..2076f9a8bb07487a7e3c3497d98fcaeca12e9283 100644 (file)
@@ -770,11 +770,12 @@ namespace DoFRenumbering
         all_dof_counts(fe_collection.n_components() *
                        Utilities::MPI::n_mpi_processes (tria->get_communicator()));
 
-        MPI_Allgather ( &local_dof_count[0],
-                        n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
-                        &all_dof_counts[0],
-                        n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
-                        tria->get_communicator());
+        const int ierr = MPI_Allgather ( &local_dof_count[0],
+                                         n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                         &all_dof_counts[0],
+                                         n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                         tria->get_communicator());
+        AssertThrowMPI(ierr);
 
         for (unsigned int i=0; i<n_buckets; ++i)
           Assert (all_dof_counts[n_buckets*tria->locally_owned_subdomain()+i]
@@ -1057,11 +1058,12 @@ namespace DoFRenumbering
         all_dof_counts(fe_collection.n_components() *
                        Utilities::MPI::n_mpi_processes (tria->get_communicator()));
 
-        MPI_Allgather ( &local_dof_count[0],
-                        n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
-                        &all_dof_counts[0],
-                        n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
-                        tria->get_communicator());
+        const int ierr = MPI_Allgather ( &local_dof_count[0],
+                                         n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                         &all_dof_counts[0],
+                                         n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+                                         tria->get_communicator());
+        AssertThrowMPI(ierr);
 
         for (unsigned int i=0; i<n_buckets; ++i)
           Assert (all_dof_counts[n_buckets*tria->locally_owned_subdomain()+i]
index 90e1a0b00ac051b0004f4431deb7be61c454cbe7..3475ddf8da36e58a62ae3e7c12c3f20c47054a7d 100644 (file)
@@ -1704,9 +1704,10 @@ namespace DoFTools
       {
         std::vector<types::global_dof_index> local_dof_count = dofs_per_component;
 
-        MPI_Allreduce ( &local_dof_count[0], &dofs_per_component[0], n_target_components,
-                        DEAL_II_DOF_INDEX_MPI_TYPE,
-                        MPI_SUM, tria->get_communicator());
+        const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_component[0], n_target_components,
+                                        DEAL_II_DOF_INDEX_MPI_TYPE,
+                                        MPI_SUM, tria->get_communicator());
+        AssertThrowMPI (ierr);
       }
 #endif
   }
@@ -1781,10 +1782,11 @@ namespace DoFTools
                (&dof_handler.get_triangulation())))
           {
             std::vector<types::global_dof_index> local_dof_count = dofs_per_block;
-            MPI_Allreduce ( &local_dof_count[0], &dofs_per_block[0],
-                            n_target_blocks,
-                            DEAL_II_DOF_INDEX_MPI_TYPE,
-                            MPI_SUM, tria->get_communicator());
+            const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_block[0],
+                                            n_target_blocks,
+                                            DEAL_II_DOF_INDEX_MPI_TYPE,
+                                            MPI_SUM, tria->get_communicator());
+            AssertThrowMPI (ierr);
           }
 #endif
       }
index 2e4a1163d551df5cd73d5cc09703ed3e9404d5a5..f8664dd84e364180e2a9b522d08ceacdeb870961 100755 (executable)
@@ -1086,12 +1086,13 @@ namespace FETools
           destinations.push_back (it->receiver);
 
           it->pack_data (*buffer);
-          MPI_Isend (&(*buffer)[0], buffer->size(),
-                     MPI_BYTE,
-                     it->receiver,
-                     round,
-                     communicator,
-                     &requests[idx]);
+          const int ierr = MPI_Isend (&(*buffer)[0], buffer->size(),
+                                      MPI_BYTE,
+                                      it->receiver,
+                                      round,
+                                      communicator,
+                                      &requests[idx]);
+          AssertThrowMPI(ierr);
         }
 
       Assert(destinations.size()==cells_to_send.size(), ExcInternalError());
@@ -1106,12 +1107,15 @@ namespace FETools
         {
           MPI_Status status;
           int len;
-          MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status);
-          MPI_Get_count(&status, MPI_BYTE, &len);
+          int ierr = MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status);
+          AssertThrowMPI(ierr);
+          ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+          AssertThrowMPI(ierr);
           receive.resize (len);
 
           char *buf = &receive[0];
-          MPI_Recv (buf, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, communicator, &status);
+          ierr = MPI_Recv (buf, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, communicator, &status);
+          AssertThrowMPI(ierr);
 
           cell_data.unpack_data (receive);
 
@@ -1124,7 +1128,10 @@ namespace FETools
         }
 
       if (requests.size () > 0)
-        MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+        {
+          const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+        }
 
       // finally sort the list of cells
       std::sort (received_cells.begin (), received_cells.end ());
index d75ec11de151505f3d2dead98af8e13108c795aa..5d667876a693423ca28a852a4281b962d9244771 100644 (file)
@@ -1800,8 +1800,9 @@ next_cell:
     // processors and shifting the indices accordingly
     const unsigned int n_cpu = Utilities::MPI::n_mpi_processes(triangulation.get_communicator());
     std::vector<types::global_vertex_index> indices(n_cpu);
-    MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, &indices[0],
-                  indices.size(), DEAL_II_DOF_INDEX_MPI_TYPE, triangulation.get_communicator());
+    int ierr = MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, &indices[0],
+                             indices.size(), DEAL_II_DOF_INDEX_MPI_TYPE, triangulation.get_communicator());
+    AssertThrowMPI(ierr);
     const types::global_vertex_index shift = std::accumulate(&indices[0],
                                                              &indices[0]+triangulation.locally_owned_subdomain(),0);
 
@@ -1841,8 +1842,9 @@ next_cell:
           }
 
         // Send the message
-        MPI_Isend(&vertices_send_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
-                  destination, 0, triangulation.get_communicator(), &first_requests[i]);
+        ierr = MPI_Isend(&vertices_send_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
+                         destination, 0, triangulation.get_communicator(), &first_requests[i]);
+        AssertThrowMPI(ierr);
       }
 
     // Receive the first message
@@ -1859,8 +1861,9 @@ next_cell:
         vertices_recv_buffers[i].resize(buffer_size);
 
         // Receive the message
-        MPI_Recv(&vertices_recv_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
-                 source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+        ierr = MPI_Recv(&vertices_recv_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
+                        source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+        AssertThrowMPI(ierr);
       }
 
 
@@ -1893,8 +1896,9 @@ next_cell:
           }
 
         // Send the message
-        MPI_Isend(&cellids_send_buffers[i][0], buffer_size, MPI_CHAR,
-                  destination, 0, triangulation.get_communicator(), &second_requests[i]);
+        ierr = MPI_Isend(&cellids_send_buffers[i][0], buffer_size, MPI_CHAR,
+                         destination, 0, triangulation.get_communicator(), &second_requests[i]);
+        AssertThrowMPI(ierr);
       }
 
     // Receive the second message
@@ -1908,8 +1912,9 @@ next_cell:
         cellids_recv_buffers[i].resize(buffer_size);
 
         // Receive the message
-        MPI_Recv(&cellids_recv_buffers[i][0],buffer_size, MPI_CHAR,
-                 source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+        ierr = MPI_Recv(&cellids_recv_buffers[i][0],buffer_size, MPI_CHAR,
+                        source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+        AssertThrowMPI(ierr);
       }
 
 
index 350cd548421cde5e4adf61e9b6eab10c5ae95533..76f59ade320518a46eae560dd44aa1a82a41cd4c 100644 (file)
@@ -207,6 +207,8 @@ namespace PETScWrappers
   void
   MatrixBase::compress (const VectorOperation::values operation)
   {
+    int ierr;
+    (void)ierr;
 #ifdef DEBUG
 #ifdef DEAL_II_WITH_MPI
     // Check that all processors agree that last_action is the same (or none!)
@@ -214,8 +216,9 @@ namespace PETScWrappers
     int my_int_last_action = last_action;
     int all_int_last_action;
 
-    MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
-                  MPI_BOR, get_mpi_communicator());
+    ierr = MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
+                         MPI_BOR, get_mpi_communicator());
+    AssertThrowMPI(ierr);
 
     AssertThrow(all_int_last_action != (VectorOperation::add | VectorOperation::insert),
                 ExcMessage("Error: not all processors agree on the last VectorOperation before this compress() call."));
@@ -227,7 +230,6 @@ namespace PETScWrappers
                 ExcMessage("Missing compress() or calling with wrong VectorOperation argument."));
 
     // flush buffers
-    int ierr;
     ierr = MatAssemblyBegin (matrix,MAT_FINAL_ASSEMBLY);
     AssertThrow (ierr == 0, ExcPETScError(ierr));
 
index c1364370f39fe7752935e137c9d6636bf0595380..50bc5f57f6c5b874b1468a48267781e61c5d019d 100644 (file)
@@ -121,8 +121,9 @@ namespace PETScWrappers
       // mismatch (may not be true for every proc)
 
       int k_global, k = ((size() != n) || (local_size() != local_sz));
-      MPI_Allreduce (&k, &k_global, 1,
-                     MPI_INT, MPI_LOR, communicator);
+      int ierr = MPI_Allreduce (&k, &k_global, 1,
+                                MPI_INT, MPI_LOR, communicator);
+      AssertThrowMPI(ierr);
 
       if (k_global || has_ghost_elements())
         {
@@ -134,7 +135,6 @@ namespace PETScWrappers
 //         AssertThrow (ierr == 0, ExcPETScError(ierr));
 
           // so let's go the slow way:
-          int ierr;
 
 #if DEAL_II_PETSC_VERSION_LT(3,2,0)
           ierr = VecDestroy (vector);
@@ -413,7 +413,8 @@ namespace PETScWrappers
             i++)
         {
           // This is slow, but most likely only used to debug.
-          MPI_Barrier(communicator);
+          ierr = MPI_Barrier(communicator);
+          AssertThrowMPI(ierr);
           if (i == Utilities::MPI::this_mpi_process(communicator))
             {
               if (across)
index febffa9c45d8a1af6291b4f2c4df9023cb7a9a9f..4d36f0e414c55c1e8462193f18825fd9ff6b38dd 100644 (file)
@@ -405,6 +405,8 @@ namespace PETScWrappers
   void
   VectorBase::compress (const VectorOperation::values operation)
   {
+    int ierr;
+    (void)ierr;
 #ifdef DEBUG
 #ifdef DEAL_II_WITH_MPI
     // Check that all processors agree that last_action is the same (or none!)
@@ -412,8 +414,9 @@ namespace PETScWrappers
     int my_int_last_action = last_action;
     int all_int_last_action;
 
-    MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
-                  MPI_BOR, get_mpi_communicator());
+    ierr = MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
+                         MPI_BOR, get_mpi_communicator());
+    AssertThrowMPI(ierr);
 
     AssertThrow(all_int_last_action != (::dealii::VectorOperation::add | ::dealii::VectorOperation::insert),
                 ExcMessage("Error: not all processors agree on the last VectorOperation before this compress() call."));
@@ -438,7 +441,6 @@ namespace PETScWrappers
     // we still need to call
     // VecAssemblyBegin/End on all
     // processors.
-    int ierr;
     ierr = VecAssemblyBegin(vector);
     AssertThrow (ierr == 0, ExcPETScError(ierr));
     ierr = VecAssemblyEnd(vector);
index 850e3501d17522a904d791340f1a76f982b006d9..f57b2cfd2e40e040b5c56408d3877fa788345845 100644 (file)
@@ -595,13 +595,16 @@ namespace SparsityTools
     {
       unsigned int idx=0;
       for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
-        MPI_Isend(&(it->second[0]),
-                  it->second.size(),
-                  DEAL_II_DOF_INDEX_MPI_TYPE,
-                  it->first,
-                  124,
-                  mpi_comm,
-                  &requests[idx]);
+        {
+          const int ierr = MPI_Isend(&(it->second[0]),
+                                     it->second.size(),
+                                     DEAL_II_DOF_INDEX_MPI_TYPE,
+                                     it->first,
+                                     124,
+                                     mpi_comm,
+                                     &requests[idx]);
+          AssertThrowMPI(ierr);
+        }
     }
 
     {
@@ -611,13 +614,16 @@ namespace SparsityTools
         {
           MPI_Status status;
           int len;
-          MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+          int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+          AssertThrowMPI(ierr);
           Assert (status.MPI_TAG==124, ExcInternalError());
 
-          MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+          ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+          AssertThrowMPI(ierr);
           recv_buf.resize(len);
-          MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
-                   status.MPI_TAG, mpi_comm, &status);
+          ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
+                          status.MPI_TAG, mpi_comm, &status);
+          AssertThrowMPI(ierr);
 
           std::vector<DynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
           std::vector<DynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
@@ -639,7 +645,10 @@ namespace SparsityTools
 
     // complete all sends, so that we can safely destroy the buffers.
     if (requests.size())
-      MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+      {
+        const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+        AssertThrowMPI(ierr);
+      }
 
   }
 
@@ -717,13 +726,16 @@ namespace SparsityTools
     {
       unsigned int idx=0;
       for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
-        MPI_Isend(&(it->second[0]),
-                  it->second.size(),
-                  DEAL_II_DOF_INDEX_MPI_TYPE,
-                  it->first,
-                  124,
-                  mpi_comm,
-                  &requests[idx]);
+        {
+          const int ierr = MPI_Isend(&(it->second[0]),
+                                     it->second.size(),
+                                     DEAL_II_DOF_INDEX_MPI_TYPE,
+                                     it->first,
+                                     124,
+                                     mpi_comm,
+                                     &requests[idx]);
+          AssertThrowMPI(ierr);
+        }
     }
 
     {
@@ -733,13 +745,16 @@ namespace SparsityTools
         {
           MPI_Status status;
           int len;
-          MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+          int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+          AssertThrowMPI(ierr);
           Assert (status.MPI_TAG==124, ExcInternalError());
 
-          MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+          ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+          AssertThrowMPI(ierr);
           recv_buf.resize(len);
-          MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
-                   status.MPI_TAG, mpi_comm, &status);
+          ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
+                          status.MPI_TAG, mpi_comm, &status);
+          AssertThrowMPI(ierr);
 
           std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
           std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
@@ -761,7 +776,10 @@ namespace SparsityTools
 
     // complete all sends, so that we can safely destroy the buffers.
     if (requests.size())
-      MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+      {
+        const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+        AssertThrowMPI(ierr);
+      }
   }
 #endif
 }
index 1002f233ce5fadcc2023cfd410d9d577d92e14db..f5adec44bc03f51ccae0bf3a247bd955a485313d 100644 (file)
@@ -210,9 +210,18 @@ namespace
               // the receiving end will be waitng. In that case we just send
               // an empty message.
               if (data.size())
-                MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
+                {
+                  const int ierr = MPI_Isend(&data[0], data.size()*sizeof(data[0]),
+                                             MPI_BYTE, dest, 71, tria->get_communicator(),
+                                             &*requests.rbegin());
+                  AssertThrowMPI(ierr);
+                }
               else
-                MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
+                {
+                  const int ierr = MPI_Isend(NULL, 0, MPI_BYTE, dest, 71,
+                                             tria->get_communicator(), &*requests.rbegin());
+                  AssertThrowMPI(ierr);
+                }
             }
         }
 
@@ -224,14 +233,16 @@ namespace
             {
               MPI_Status status;
               int len;
-              MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status);
-              MPI_Get_count(&status, MPI_BYTE, &len);
+              int ierr = MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status);
+              AssertThrowMPI(ierr);
+              ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+              AssertThrowMPI(ierr);
 
               if (len==0)
                 {
-                  int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                                     tria->get_communicator(), &status);
-                  AssertThrow(err==MPI_SUCCESS, ExcInternalError());
+                  ierr = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                                  tria->get_communicator(), &status);
+                  AssertThrowMPI(ierr);
                   continue;
                 }
 
@@ -240,9 +251,9 @@ namespace
               receive_buffer.resize(count);
 
               void *ptr = &receive_buffer[0];
-              int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                                 tria->get_communicator(), &status);
-              AssertThrow(err==MPI_SUCCESS, ExcInternalError());
+              ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                              tria->get_communicator(), &status);
+              AssertThrowMPI(ierr);
 
               for (unsigned int i=0; i<receive_buffer.size(); ++i)
                 {
@@ -256,14 +267,16 @@ namespace
         // * wait for all MPI_Isend to complete
         if (requests.size() > 0)
           {
-            MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+            const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+            AssertThrowMPI(ierr);
             requests.clear();
           }
 #ifdef DEBUG
         // Make sure in debug mode, that everybody sent/received all packages
         // on this level. If a deadlock occurs here, the list of expected
         // senders is not computed correctly.
-        MPI_Barrier(tria->get_communicator());
+        const int ierr = MPI_Barrier(tria->get_communicator());
+        AssertThrowMPI(ierr);
 #endif
       }
 #endif

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.