From 8485d4ffe96202f0ecb075a1697b664ae0a8f0d8 Mon Sep 17 00:00:00 2001 From: David Wells Date: Wed, 9 Nov 2016 19:08:40 -0500 Subject: [PATCH] Check all MPI calls. This commit adds AssertThrowMPI checks to the return code of every MPI call in deal.II. --- doc/news/changes.h | 9 +- include/deal.II/base/mpi.templates.h | 64 ++++---- .../lac/la_parallel_vector.templates.h | 140 ++++++++++-------- .../matrix_free/mapping_info.templates.h | 10 +- .../matrix_free/matrix_free.templates.h | 5 +- .../deal.II/multigrid/mg_transfer.templates.h | 21 ++- .../deal.II/numerics/vector_tools.templates.h | 7 +- source/base/data_out_base.cc | 73 +++++---- source/base/index_set.cc | 11 +- source/base/mpi.cc | 55 +++---- source/base/partitioner.cc | 51 ++++--- source/base/timer.cc | 5 +- source/base/utilities.cc | 3 +- source/distributed/grid_refinement.cc | 35 +++-- source/distributed/tria.cc | 49 +++--- source/distributed/tria_base.cc | 15 +- source/dofs/dof_handler_policy.cc | 130 +++++++++------- source/dofs/dof_renumbering.cc | 22 +-- source/dofs/dof_tools.cc | 16 +- source/fe/fe_tools_extrapolate.cc | 27 ++-- source/grid/grid_tools.cc | 25 ++-- source/lac/petsc_matrix_base.cc | 8 +- source/lac/petsc_parallel_vector.cc | 9 +- source/lac/petsc_vector_base.cc | 8 +- source/lac/sparsity_tools.cc | 66 ++++++--- source/multigrid/mg_level_global_transfer.cc | 37 +++-- 26 files changed, 539 insertions(+), 362 deletions(-) diff --git a/doc/news/changes.h b/doc/news/changes.h index b4e8ea63bc..80ea3a440f 100644 --- a/doc/news/changes.h +++ b/doc/news/changes.h @@ -215,6 +215,13 @@ inconvenience this causes.

General

    +
  1. Improved: the error codes for all MPI functions are now checked and, if the + MPI function failed for any reason, an exception with a helpful message is + thrown. +
    + (David Wells, 2016/11/09) +
  2. +
  3. Fixed: We have run the PVS static analysis checker on the entire code base, to see what possible problems it uncovers (see @@ -466,7 +473,7 @@ inconvenience this causes.
    (Rajat Arora, 2016/10/29)
  4. - +
  5. New: Add MatrixFreeOperators::MassOperator representing a mass matrix.
    (Daniel Arndt, 2016/10/27) diff --git a/include/deal.II/base/mpi.templates.h b/include/deal.II/base/mpi.templates.h index a5806cc217..e67dc1fe87 100644 --- a/include/deal.II/base/mpi.templates.h +++ b/include/deal.II/base/mpi.templates.h @@ -95,21 +95,23 @@ namespace Utilities #ifdef DEAL_II_WITH_MPI if (job_supports_mpi()) { - MPI_Allreduce (values != output - ? - // TODO This const_cast is only needed for older - // (e.g., openMPI 1.6, released in 2012) - // implementations of MPI-2. It is not needed as of - // MPI-3 and we should remove it at some point in - // the future. - const_cast(static_cast(values)) - : - MPI_IN_PLACE, - static_cast(output), - static_cast(size), - internal::mpi_type_id(values), - mpi_op, - mpi_communicator); + const int ierr = MPI_Allreduce + (values != output + ? + // TODO This const_cast is only needed for older + // (e.g., openMPI 1.6, released in 2012) + // implementations of MPI-2. It is not needed as + // of MPI-3 and we should remove it at some + // point in the future. + const_cast(static_cast(values)) + : + MPI_IN_PLACE, + static_cast(output), + static_cast(size), + internal::mpi_type_id(values), + mpi_op, + mpi_communicator); + AssertThrowMPI(ierr); } else #endif @@ -132,21 +134,23 @@ namespace Utilities if (job_supports_mpi()) { T dummy_selector; - MPI_Allreduce (values != output - ? - // TODO This const_cast is only needed for older - // (e.g., openMPI 1.6, released in 2012) - // implementations of MPI-2. It is not needed as of - // MPI-3 and we should remove it at some point in - // the future. - const_cast(static_cast(values)) - : - MPI_IN_PLACE, - static_cast(output), - static_cast(size*2), - internal::mpi_type_id(&dummy_selector), - mpi_op, - mpi_communicator); + const int ierr = MPI_Allreduce + (values != output + ? + // TODO This const_cast is only needed for older + // (e.g., openMPI 1.6, released in 2012) + // implementations of MPI-2. It is not needed as + // of MPI-3 and we should remove it at some + // point in the future. + const_cast(static_cast(values)) + : + MPI_IN_PLACE, + static_cast(output), + static_cast(size*2), + internal::mpi_type_id(&dummy_selector), + mpi_op, + mpi_communicator); + AssertThrowMPI(ierr); } else #endif diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 1c3c48237d..da622a1f75 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -40,10 +40,16 @@ namespace LinearAlgebra { #ifdef DEAL_II_WITH_MPI for (size_type j=0; jval[current_index_start], - part.ghost_targets()[i].second*sizeof(Number), - MPI_BYTE, - part.ghost_targets()[i].first, - part.this_mpi_process() + - part.n_mpi_processes()*channel, - part.get_communicator(), - &compress_requests[n_import_targets+i]); + const int ierr = MPI_Send_init (&this->val[current_index_start], + part.ghost_targets()[i].second*sizeof(Number), + MPI_BYTE, + part.ghost_targets()[i].first, + part.this_mpi_process() + + part.n_mpi_processes()*channel, + part.get_communicator(), + &compress_requests[n_import_targets+i]); + AssertThrowMPI (ierr); current_index_start += part.ghost_targets()[i].second; } AssertDimension (current_index_start, @@ -606,9 +614,8 @@ namespace LinearAlgebra compress_requests.size()); if (compress_requests.size() > 0) { - int ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]); - (void)ierr; - Assert (ierr == MPI_SUCCESS, ExcInternalError()); + const int ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]); + AssertThrowMPI(ierr); } #endif } @@ -650,10 +657,9 @@ namespace LinearAlgebra // first wait for the receive to complete if (compress_requests.size() > 0 && n_import_targets > 0) { - int ierr = MPI_Waitall (n_import_targets, &compress_requests[0], - MPI_STATUSES_IGNORE); - (void)ierr; - Assert (ierr == MPI_SUCCESS, ExcInternalError()); + const int ierr = MPI_Waitall (n_import_targets, &compress_requests[0], + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); Number *read_position = import_data; std::vector >::const_iterator @@ -682,11 +688,10 @@ namespace LinearAlgebra if (compress_requests.size() > 0 && n_ghost_targets > 0) { - int ierr = MPI_Waitall (n_ghost_targets, - &compress_requests[n_import_targets], - MPI_STATUSES_IGNORE); - (void)ierr; - Assert (ierr == MPI_SUCCESS, ExcInternalError()); + const int ierr = MPI_Waitall (n_ghost_targets, + &compress_requests[n_import_targets], + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); } else AssertDimension (part.n_ghost_indices(), 0); @@ -727,14 +732,15 @@ namespace LinearAlgebra { // allow writing into ghost indices even though we are in a // const function - MPI_Recv_init (const_cast(&val[current_index_start]), - part.ghost_targets()[i].second*sizeof(Number), - MPI_BYTE, - part.ghost_targets()[i].first, - part.ghost_targets()[i].first + - counter*part.n_mpi_processes(), - part.get_communicator(), - &update_ghost_values_requests[i]); + const int ierr = MPI_Recv_init (const_cast(&val[current_index_start]), + part.ghost_targets()[i].second*sizeof(Number), + MPI_BYTE, + part.ghost_targets()[i].first, + part.ghost_targets()[i].first + + counter*part.n_mpi_processes(), + part.get_communicator(), + &update_ghost_values_requests[i]); + AssertThrowMPI (ierr); current_index_start += part.ghost_targets()[i].second; } AssertDimension (current_index_start, @@ -746,13 +752,14 @@ namespace LinearAlgebra current_index_start = 0; for (unsigned int i=0; i 0) { - int ierr = MPI_Startall(update_ghost_values_requests.size(), - &update_ghost_values_requests[0]); - (void)ierr; - Assert (ierr == MPI_SUCCESS, ExcInternalError()); + const int ierr = MPI_Startall(update_ghost_values_requests.size(), + &update_ghost_values_requests[0]); + AssertThrowMPI(ierr); } #else (void)counter; @@ -801,11 +807,10 @@ namespace LinearAlgebra // make this function thread safe Threads::Mutex::ScopedLock lock (mutex); - int ierr = MPI_Waitall (update_ghost_values_requests.size(), - &update_ghost_values_requests[0], - MPI_STATUSES_IGNORE); - (void)ierr; - Assert (ierr == MPI_SUCCESS, ExcInternalError()); + const int ierr = MPI_Waitall (update_ghost_values_requests.size(), + &update_ghost_values_requests[0], + MPI_STATUSES_IGNORE); + AssertThrowMPI (ierr); } #endif vector_is_ghosted = true; @@ -869,19 +874,19 @@ namespace LinearAlgebra int flag = 1; if (update_ghost_values_requests.size()>0) { - int ierr = MPI_Testall (update_ghost_values_requests.size(), - &update_ghost_values_requests[0], - &flag, MPI_STATUSES_IGNORE); - Assert (ierr == MPI_SUCCESS, ExcInternalError()); + const int ierr = MPI_Testall (update_ghost_values_requests.size(), + &update_ghost_values_requests[0], + &flag, MPI_STATUSES_IGNORE); + AssertThrowMPI (ierr); Assert (flag == 1, ExcMessage("MPI found unfinished update_ghost_values() requests" "when calling swap, which is not allowed")); } if (compress_requests.size()>0) { - int ierr = MPI_Testall (compress_requests.size(), &compress_requests[0], - &flag, MPI_STATUSES_IGNORE); - Assert (ierr == MPI_SUCCESS, ExcInternalError()); + const int ierr = MPI_Testall (compress_requests.size(), &compress_requests[0], + &flag, MPI_STATUSES_IGNORE); + AssertThrowMPI (ierr); Assert (flag == 1, ExcMessage("MPI found unfinished compress() requests " "when calling swap, which is not allowed")); @@ -1533,7 +1538,10 @@ namespace LinearAlgebra #ifdef DEAL_II_WITH_MPI if (partitioner->n_mpi_processes() > 1) for (unsigned int i=0; ithis_mpi_process(); i++) - MPI_Barrier (partitioner->get_communicator()); + { + const int ierr = MPI_Barrier (partitioner->get_communicator()); + AssertThrowMPI (ierr); + } #endif out << "Process #" << partitioner->this_mpi_process() << std::endl @@ -1568,11 +1576,15 @@ namespace LinearAlgebra #ifdef DEAL_II_WITH_MPI if (partitioner->n_mpi_processes() > 1) { - MPI_Barrier (partitioner->get_communicator()); + int ierr = MPI_Barrier (partitioner->get_communicator()); + AssertThrowMPI (ierr); for (unsigned int i=partitioner->this_mpi_process()+1; in_mpi_processes(); i++) - MPI_Barrier (partitioner->get_communicator()); + { + ierr = MPI_Barrier (partitioner->get_communicator()); + AssertThrowMPI (ierr); + } } #endif diff --git a/include/deal.II/matrix_free/mapping_info.templates.h b/include/deal.II/matrix_free/mapping_info.templates.h index 7923a53a6f..82ce6b40a4 100644 --- a/include/deal.II/matrix_free/mapping_info.templates.h +++ b/include/deal.II/matrix_free/mapping_info.templates.h @@ -866,8 +866,9 @@ namespace internal // disable the check here only if no processor has any such data #ifdef DEAL_II_WITH_MPI unsigned int general_size_glob = 0, general_size_loc = jacobians.size(); - MPI_Allreduce (&general_size_loc, &general_size_glob, 1, MPI_UNSIGNED, - MPI_MAX, size_info.communicator); + int ierr = MPI_Allreduce (&general_size_loc, &general_size_glob, 1, + MPI_UNSIGNED, MPI_MAX, size_info.communicator); + AssertThrowMPI (ierr); #else unsigned int general_size_glob = jacobians.size(); #endif @@ -885,8 +886,9 @@ namespace internal #ifdef DEAL_II_WITH_MPI unsigned int quad_size_glob = 0, quad_size_loc = quadrature_points.size(); - MPI_Allreduce (&quad_size_loc, &quad_size_glob, 1, MPI_UNSIGNED, - MPI_MAX, size_info.communicator); + ierr = MPI_Allreduce (&quad_size_loc, &quad_size_glob, 1, MPI_UNSIGNED, + MPI_MAX, size_info.communicator); + AssertThrowMPI (ierr); #else unsigned int quad_size_glob = quadrature_points.size(); #endif diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index 72de138444..c6dfab2e8a 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -85,8 +85,9 @@ namespace internal if (Utilities::MPI::job_supports_mpi()) { int communicators_same = 0; - MPI_Comm_compare (dist_tria->get_communicator(), comm_mf, - &communicators_same); + const int ierr = MPI_Comm_compare (dist_tria->get_communicator(), comm_mf, + &communicators_same); + AssertThrowMPI (ierr); Assert (communicators_same == MPI_IDENT || communicators_same == MPI_CONGRUENT, ExcMessage ("MPI communicator in parallel::distributed::Triangulation " diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index 143cbbefc6..8b525c4e90 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -203,7 +203,8 @@ MGLevelGlobalTransfer::copy_to_mg reinit_vector(mg_dof_handler, component_to_block_map, dst); #ifdef DEBUG_OUTPUT std::cout << "copy_to_mg src " << src.l2_norm() << std::endl; - MPI_Barrier(MPI_COMM_WORLD); + int ierr = MPI_Barrier(MPI_COMM_WORLD); + AssertThrowMPI(ierr); #endif if (perform_plain_copy) @@ -220,7 +221,8 @@ MGLevelGlobalTransfer::copy_to_mg { --level; #ifdef DEBUG_OUTPUT - MPI_Barrier(MPI_COMM_WORLD); + ierr = MPI_Barrier(MPI_COMM_WORLD); + AssertThrowMPI(ierr); #endif typedef std::vector >::const_iterator dof_pair_iterator; @@ -240,7 +242,8 @@ MGLevelGlobalTransfer::copy_to_mg dst_level.compress(VectorOperation::insert); #ifdef DEBUG_OUTPUT - MPI_Barrier(MPI_COMM_WORLD); + ierr = MPI_Barrier(MPI_COMM_WORLD); + AssertThrowMPI(ierr); std::cout << "copy_to_mg dst " << level << " " << dst_level.l2_norm() << std::endl; #endif } @@ -273,9 +276,11 @@ MGLevelGlobalTransfer::copy_from_mg for (unsigned int level=src.min_level(); level<=src.max_level(); ++level) { #ifdef DEBUG_OUTPUT - MPI_Barrier(MPI_COMM_WORLD); + int ierr = MPI_Barrier(MPI_COMM_WORLD); + AssertThrowMPI(ierr); std::cout << "copy_from_mg src " << level << " " << src[level].l2_norm() << std::endl; - MPI_Barrier(MPI_COMM_WORLD); + ierr = MPI_Barrier(MPI_COMM_WORLD); + AssertThrowMPI(ierr); #endif typedef std::vector >::const_iterator dof_pair_iterator; @@ -295,14 +300,16 @@ MGLevelGlobalTransfer::copy_from_mg #ifdef DEBUG_OUTPUT { dst.compress(VectorOperation::insert); - MPI_Barrier(MPI_COMM_WORLD); + ierr = MPI_Barrier(MPI_COMM_WORLD); + AssertThrowMPI(ierr); std::cout << "copy_from_mg level=" << level << " " << dst.l2_norm() << std::endl; } #endif } dst.compress(VectorOperation::insert); #ifdef DEBUG_OUTPUT - MPI_Barrier(MPI_COMM_WORLD); + const int ierr = MPI_Barrier(MPI_COMM_WORLD); + AssertThrowMPI(ierr); std::cout << "copy_from_mg " << dst.l2_norm() << std::endl; #endif } diff --git a/include/deal.II/numerics/vector_tools.templates.h b/include/deal.II/numerics/vector_tools.templates.h index e277a25b89..6f04aa2ace 100644 --- a/include/deal.II/numerics/vector_tools.templates.h +++ b/include/deal.II/numerics/vector_tools.templates.h @@ -7607,9 +7607,10 @@ namespace VectorTools double my_values[3] = { mean_double.real(), mean_double.imag(), area }; double global_values[3]; - MPI_Allreduce (my_values, global_values, 3, MPI_DOUBLE, - MPI_SUM, - p_triangulation->get_communicator()); + const int ierr = MPI_Allreduce (my_values, global_values, 3, MPI_DOUBLE, + MPI_SUM, + p_triangulation->get_communicator()); + AssertThrowMPI (ierr); set_possibly_complex_number(global_values[0], global_values[1], mean); diff --git a/source/base/data_out_base.cc b/source/base/data_out_base.cc index ad6c48e0d5..f137ec11c7 100644 --- a/source/base/data_out_base.cc +++ b/source/base/data_out_base.cc @@ -6156,28 +6156,28 @@ void DataOutInterface::write_vtu_in_parallel (const char *filename write_vtu (f); #else - int myrank, nproc, err; - MPI_Comm_rank(comm, &myrank); - MPI_Comm_size(comm, &nproc); + int myrank, nproc; + int ierr = MPI_Comm_rank(comm, &myrank); + AssertThrowMPI(ierr); + ierr = MPI_Comm_size(comm, &nproc); + AssertThrowMPI(ierr); MPI_Info info; - MPI_Info_create(&info); + ierr = MPI_Info_create(&info); + AssertThrowMPI(ierr); MPI_File fh; - err = MPI_File_open(comm, const_cast(filename), - MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh); - AssertThrow(err==0, - ExcMessage("Unable to open file <" - + std::string(filename) + - "> with MPI_File_open. The error code " - "returned was " - + Utilities::to_string(err) + ".")); + ierr = MPI_File_open(comm, const_cast(filename), + MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh); + AssertThrowMPI(ierr); - - MPI_File_set_size(fh, 0); // delete the file contents + ierr = MPI_File_set_size(fh, 0); // delete the file contents + AssertThrowMPI(ierr); // this barrier is necessary, because otherwise others might already // write while one core is still setting the size to zero. - MPI_Barrier(comm); - MPI_Info_free(&info); + ierr = MPI_Barrier(comm); + AssertThrowMPI(ierr); + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); unsigned int header_size; @@ -6187,18 +6187,24 @@ void DataOutInterface::write_vtu_in_parallel (const char *filename std::stringstream ss; DataOutBase::write_vtu_header(ss, vtk_flags); header_size = ss.str().size(); - MPI_File_write(fh, const_cast(ss.str().c_str()), header_size, MPI_CHAR, MPI_STATUS_IGNORE); + ierr = MPI_File_write(fh, const_cast(ss.str().c_str()), header_size, + MPI_CHAR, MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); } - MPI_Bcast(&header_size, 1, MPI_UNSIGNED, 0, comm); + ierr = MPI_Bcast(&header_size, 1, MPI_UNSIGNED, 0, comm); + AssertThrowMPI(ierr); - MPI_File_seek_shared( fh, header_size, MPI_SEEK_SET ); + ierr = MPI_File_seek_shared( fh, header_size, MPI_SEEK_SET ); + AssertThrowMPI(ierr); { std::stringstream ss; DataOutBase::write_vtu_main (get_patches(), get_dataset_names(), get_vector_data_ranges(), vtk_flags, ss); - MPI_File_write_ordered(fh, const_cast(ss.str().c_str()), ss.str().size(), MPI_CHAR, MPI_STATUS_IGNORE); + ierr = MPI_File_write_ordered(fh, const_cast(ss.str().c_str()), + ss.str().size(), MPI_CHAR, MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); } //write footer @@ -6207,9 +6213,12 @@ void DataOutInterface::write_vtu_in_parallel (const char *filename std::stringstream ss; DataOutBase::write_vtu_footer(ss); unsigned int footer_size = ss.str().size(); - MPI_File_write_shared(fh, const_cast(ss.str().c_str()), footer_size, MPI_CHAR, MPI_STATUS_IGNORE); + ierr = MPI_File_write_shared(fh, const_cast(ss.str().c_str()), + footer_size, MPI_CHAR, MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); } - MPI_File_close( &fh ); + ierr = MPI_File_close( &fh ); + AssertThrowMPI(ierr); #endif } @@ -6469,8 +6478,10 @@ create_xdmf_entry (const DataOutBase::DataOutFilter &data_filter, // And compute the global total #ifdef DEAL_II_WITH_MPI - MPI_Comm_rank(comm, &myrank); - MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm); + int ierr = MPI_Comm_rank(comm, &myrank); + AssertThrowMPI(ierr); + ierr = MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm); + AssertThrowMPI(ierr); #else myrank = 0; global_node_cell_count[0] = local_node_cell_count[0]; @@ -6507,7 +6518,8 @@ write_xdmf_file (const std::vector &entries, int myrank; #ifdef DEAL_II_WITH_MPI - MPI_Comm_rank(comm, &myrank); + const int ierr = MPI_Comm_rank(comm, &myrank); + AssertThrowMPI(ierr); #else (void)comm; myrank = 0; @@ -6737,6 +6749,8 @@ void DataOutBase::write_hdf5_parallel (const std::vector > & const std::string &solution_filename, MPI_Comm comm) { + int ierr; + (void)ierr; #ifndef DEAL_II_WITH_HDF5 // throw an exception, but first make // sure the compiler does not warn about @@ -6778,7 +6792,8 @@ void DataOutBase::write_hdf5_parallel (const std::vector > & #ifndef H5_HAVE_PARALLEL # ifdef DEAL_II_WITH_MPI int world_size; - MPI_Comm_size(comm, &world_size); + ierr = MPI_Comm_size(comm, &world_size); + AssertThrowMPI(ierr); AssertThrow (world_size <= 1, ExcMessage ("Serial HDF5 output on multiple processes is not yet supported.")); # endif @@ -6802,8 +6817,10 @@ void DataOutBase::write_hdf5_parallel (const std::vector > & // Compute the global total number of nodes/cells // And determine the offset of the data for this process #ifdef DEAL_II_WITH_MPI - MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm); - MPI_Scan(local_node_cell_count, global_node_cell_offsets, 2, MPI_UNSIGNED, MPI_SUM, comm); + ierr = MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm); + AssertThrowMPI(ierr); + ierr = MPI_Scan(local_node_cell_count, global_node_cell_offsets, 2, MPI_UNSIGNED, MPI_SUM, comm); + AssertThrowMPI(ierr); global_node_cell_offsets[0] -= local_node_cell_count[0]; global_node_cell_offsets[1] -= local_node_cell_count[1]; #else diff --git a/source/base/index_set.cc b/source/base/index_set.cc index 6126766db5..778384dc50 100644 --- a/source/base/index_set.cc +++ b/source/base/index_set.cc @@ -578,9 +578,11 @@ IndexSet::is_ascending_and_one_to_one (const MPI_Comm &communicator) const const unsigned int gather_size = (my_rank==0)?n_ranks:1; std::vector global_dofs(gather_size); - MPI_Gather(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE, - &(global_dofs[0]), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0, - communicator); + int ierr = MPI_Gather(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE, + &(global_dofs[0]), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0, + communicator); + AssertThrowMPI(ierr); + if (my_rank == 0) { // find out if the received std::vector is ascending @@ -604,7 +606,8 @@ IndexSet::is_ascending_and_one_to_one (const MPI_Comm &communicator) const // now broadcast the result int is_ascending = is_globally_ascending ? 1 : 0; - MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator); + ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator); + AssertThrowMPI(ierr); return (is_ascending==1); #else diff --git a/source/base/mpi.cc b/source/base/mpi.cc index 2e3d95a7fd..3077c4d980 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -81,7 +81,8 @@ namespace Utilities unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator) { int n_jobs=1; - (void) MPI_Comm_size (mpi_communicator, &n_jobs); + const int ierr = MPI_Comm_size (mpi_communicator, &n_jobs); + AssertThrowMPI(ierr); return n_jobs; } @@ -90,7 +91,8 @@ namespace Utilities unsigned int this_mpi_process (const MPI_Comm &mpi_communicator) { int rank=0; - (void) MPI_Comm_rank (mpi_communicator, &rank); + const int ierr = MPI_Comm_rank (mpi_communicator, &rank); + AssertThrowMPI(ierr); return rank; } @@ -99,7 +101,8 @@ namespace Utilities MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator) { MPI_Comm new_communicator; - MPI_Comm_dup (mpi_communicator, &new_communicator); + const int ierr = MPI_Comm_dup (mpi_communicator, &new_communicator); + AssertThrowMPI(ierr); return new_communicator; } @@ -142,9 +145,10 @@ namespace Utilities // processors in this case, which is more expensive than the reduction // operation above in MPI_Allreduce) std::vector all_destinations (max_n_destinations * n_procs); - MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED, - &all_destinations[0], max_n_destinations, MPI_UNSIGNED, - mpi_comm); + const int ierr = MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED, + &all_destinations[0], max_n_destinations, MPI_UNSIGNED, + mpi_comm); + AssertThrowMPI(ierr); // now we know who is going to communicate with whom. collect who is // going to communicate with us! @@ -236,7 +240,7 @@ namespace Utilities MPI_Op op; int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op); - AssertThrow(ierr == MPI_SUCCESS, ExcInternalError()); + AssertThrowMPI(ierr); MinMaxAvg in; in.sum = in.min = in.max = my_value; @@ -248,18 +252,18 @@ namespace Utilities MPI_Datatype types[]= {MPI_DOUBLE, MPI_INT}; ierr = MPI_Type_struct(2, lengths, displacements, types, &type); - AssertThrow(ierr == MPI_SUCCESS, ExcInternalError()); + AssertThrowMPI(ierr); ierr = MPI_Type_commit(&type); - AssertThrow(ierr == MPI_SUCCESS, ExcInternalError()); + AssertThrowMPI(ierr); ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator); - AssertThrow(ierr == MPI_SUCCESS, ExcInternalError()); + AssertThrowMPI(ierr); ierr = MPI_Type_free (&type); - AssertThrow(ierr == MPI_SUCCESS, ExcInternalError()); + AssertThrowMPI(ierr); ierr = MPI_Op_free(&op); - AssertThrow(ierr == MPI_SUCCESS, ExcInternalError()); + AssertThrowMPI(ierr); result.avg = result.sum / numproc; @@ -324,19 +328,19 @@ namespace Utilities // if we have PETSc, we will initialize it and let it handle MPI. // Otherwise, we will do it. int MPI_has_been_started = 0; - MPI_Initialized(&MPI_has_been_started); + int ierr = MPI_Initialized(&MPI_has_been_started); + AssertThrowMPI(ierr); AssertThrow (MPI_has_been_started == 0, ExcMessage ("MPI error. You can only start MPI once!")); - int mpi_err, provided; - // this works like mpi_err = MPI_Init (&argc, &argv); but tells MPI that + int provided; + // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that // we might use several threads but never call two MPI functions at the // same time. For an explanation see on why we do this see // http://www.open-mpi.org/community/lists/users/2010/03/12244.php int wanted = MPI_THREAD_SERIALIZED; - mpi_err = MPI_Init_thread(&argc, &argv, wanted, &provided); - AssertThrow (mpi_err == 0, - ExcMessage ("MPI could not be initialized.")); + ierr = MPI_Init_thread(&argc, &argv, wanted, &provided); + AssertThrowMPI(ierr); // disable for now because at least some implementations always return // MPI_THREAD_SINGLE. @@ -397,9 +401,10 @@ namespace Utilities std::vector all_hostnames(max_hostname_size * MPI::n_mpi_processes(MPI_COMM_WORLD)); - MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR, - &all_hostnames[0], max_hostname_size, MPI_CHAR, - MPI_COMM_WORLD); + const int ierr MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR, + &all_hostnames[0], max_hostname_size, MPI_CHAR, + MPI_COMM_WORLD); + AssertThrowMPI(ierr); // search how often our own hostname appears and the how-manyth // instance the current process represents @@ -517,9 +522,8 @@ namespace Utilities } else { - const int mpi_err = MPI_Finalize(); - AssertThrow (mpi_err == 0, - ExcMessage ("An error occurred while calling MPI_Finalize()")); + const int ierr = MPI_Finalize(); + AssertThrowMPI(ierr); } } #endif @@ -531,7 +535,8 @@ namespace Utilities { #ifdef DEAL_II_WITH_MPI int MPI_has_been_started = 0; - MPI_Initialized(&MPI_has_been_started); + const int ierr = MPI_Initialized(&MPI_has_been_started); + AssertThrowMPI(ierr); return (MPI_has_been_started > 0); #else diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index abeae873e4..05ba75a923 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -183,13 +183,15 @@ namespace Utilities // Allow non-zero start index for the vector. send this data to all // processors first_index[0] = local_range_data.first; - MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE, - 0, communicator); + int ierr = MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE, + 0, communicator); + AssertThrowMPI(ierr); // Get the end-of-local_range for all processors - MPI_Allgather(&local_range_data.second, 1, - DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1, - DEAL_II_DOF_INDEX_MPI_TYPE, communicator); + ierr = MPI_Allgather(&local_range_data.second, 1, + DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1, + DEAL_II_DOF_INDEX_MPI_TYPE, communicator); + AssertThrowMPI(ierr); first_index[n_procs] = global_size; // fix case when there are some processors without any locally owned @@ -261,8 +263,9 @@ namespace Utilities for (unsigned int i=0; i > import_targets_temp; @@ -285,11 +288,13 @@ namespace Utilities std::vector import_requests (import_targets_data.size()); for (unsigned int i=0; i0) - MPI_Waitall (import_requests.size(), &import_requests[0], - MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall (import_requests.size(), + &import_requests[0], + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } // transform import indices to local index space and compress // contiguous indices in form of ranges @@ -363,8 +373,9 @@ namespace Utilities if (Utilities::MPI::job_supports_mpi()) { int communicators_same = 0; - MPI_Comm_compare (part.communicator, communicator, - &communicators_same); + const int ierr = MPI_Comm_compare (part.communicator, communicator, + &communicators_same); + AssertThrowMPI(ierr); if (!(communicators_same == MPI_IDENT || communicators_same == MPI_CONGRUENT)) return false; diff --git a/source/base/timer.cc b/source/base/timer.cc index a9f0f7e401..91a92d3fa8 100644 --- a/source/base/timer.cc +++ b/source/base/timer.cc @@ -116,7 +116,10 @@ void Timer::start () #ifdef DEAL_II_WITH_MPI if (sync_wall_time) - MPI_Barrier(mpi_communicator); + { + const int ierr = MPI_Barrier(mpi_communicator); + AssertThrowMPI(ierr); + } #endif #if defined(DEAL_II_HAVE_SYS_TIME_H) && defined(DEAL_II_HAVE_SYS_RESOURCE_H) diff --git a/source/base/utilities.cc b/source/base/utilities.cc index 66055ba1ab..c361bde0f1 100644 --- a/source/base/utilities.cc +++ b/source/base/utilities.cc @@ -784,7 +784,8 @@ namespace Utilities { MPI_Comm comm = mpi_comm->GetMpiComm(); *mpi_comm = Epetra_MpiComm(MPI_COMM_SELF); - MPI_Comm_free (&comm); + const int ierr = MPI_Comm_free (&comm); + AssertThrowMPI(ierr); } #endif } diff --git a/source/distributed/grid_refinement.cc b/source/distributed/grid_refinement.cc index 32fe0de9dc..3c43d26ce8 100644 --- a/source/distributed/grid_refinement.cc +++ b/source/distributed/grid_refinement.cc @@ -97,8 +97,9 @@ namespace // compute the minimum on // processor zero - MPI_Reduce (comp, result, 2, MPI_DOUBLE, - MPI_MIN, 0, mpi_communicator); + const int ierr = MPI_Reduce (comp, result, 2, MPI_DOUBLE, + MPI_MIN, 0, mpi_communicator); + AssertThrowMPI(ierr); // make sure only processor zero // got something @@ -131,8 +132,9 @@ namespace double result = 0; // compute the minimum on // processor zero - MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE, - MPI_SUM, 0, mpi_communicator); + const int ierr = MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE, + MPI_SUM, 0, mpi_communicator); + AssertThrowMPI(ierr); // make sure only processor zero // got something @@ -274,8 +276,9 @@ namespace do { - MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE, - master_mpi_rank, mpi_communicator); + int ierr = MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE, + master_mpi_rank, mpi_communicator); + AssertThrowMPI(ierr); if (interesting_range[0] == interesting_range[1]) return interesting_range[0]; @@ -300,8 +303,9 @@ namespace test_threshold)); unsigned int total_count; - MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED, - MPI_SUM, master_mpi_rank, mpi_communicator); + ierr = MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED, + MPI_SUM, master_mpi_rank, mpi_communicator); + AssertThrowMPI(ierr); // now adjust the range. if // we have to many cells, we @@ -369,8 +373,9 @@ namespace do { - MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE, - master_mpi_rank, mpi_communicator); + int ierr = MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE, + master_mpi_rank, mpi_communicator); + AssertThrowMPI(ierr); if (interesting_range[0] == interesting_range[1]) { @@ -384,8 +389,9 @@ namespace // actual largest value double final_threshold = std::min (interesting_range[0], global_min_and_max.second); - MPI_Bcast (&final_threshold, 1, MPI_DOUBLE, - master_mpi_rank, mpi_communicator); + ierr = MPI_Bcast (&final_threshold, 1, MPI_DOUBLE, + master_mpi_rank, mpi_communicator); + AssertThrowMPI(ierr); return final_threshold; } @@ -406,8 +412,9 @@ namespace my_error += criteria(i); double total_error; - MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE, - MPI_SUM, master_mpi_rank, mpi_communicator); + ierr = MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE, + MPI_SUM, master_mpi_rank, mpi_communicator); + AssertThrowMPI(ierr); // now adjust the range. if we have to many cells, we take // the upper half of the previous range, otherwise the lower diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index f277ad2cce..ca1f054121 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1927,8 +1927,8 @@ namespace parallel // Check that level_ghost_owners is symmetric by sending a message // to everyone { - - MPI_Barrier(this->mpi_communicator); + int ierr = MPI_Barrier(this->mpi_communicator); + AssertThrowMPI(ierr); // important: preallocate to avoid (re)allocation: std::vector requests (this->number_cache.level_ghost_owners.size()); @@ -1942,9 +1942,10 @@ namespace parallel Assert (typeid(types::subdomain_id) == typeid(unsigned int), ExcNotImplemented()); - MPI_Isend(&dummy, 1, MPI_UNSIGNED, - *it, 9001, this->mpi_communicator, - &requests[req_counter]); + ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED, + *it, 9001, this->mpi_communicator, + &requests[req_counter]); + AssertThrowMPI(ierr); } for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); @@ -1955,15 +1956,20 @@ namespace parallel == typeid(unsigned int), ExcNotImplemented()); unsigned int dummy; - MPI_Recv(&dummy, 1, MPI_UNSIGNED, - *it, 9001, this->mpi_communicator, - MPI_STATUS_IGNORE); + ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED, + *it, 9001, this->mpi_communicator, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); } if (requests.size() > 0) - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + { + ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } - MPI_Barrier(this->mpi_communicator); + ierr = MPI_Barrier(this->mpi_communicator); + AssertThrowMPI(ierr); } #endif @@ -3198,9 +3204,10 @@ namespace parallel // that the packet has been // received it->second.pack_data (*buffer); - MPI_Isend(&(*buffer)[0], buffer->size(), - MPI_BYTE, it->first, - 123, this->get_communicator(), &requests[idx]); + const int ierr = MPI_Isend(&(*buffer)[0], buffer->size(), + MPI_BYTE, it->first, + 123, this->get_communicator(), &requests[idx]); + AssertThrowMPI(ierr); } Assert(destinations.size()==needs_to_get_cells.size(), ExcInternalError()); @@ -3218,13 +3225,16 @@ namespace parallel { MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status); - MPI_Get_count(&status, MPI_BYTE, &len); + int ierr = MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); receive.resize(len); char *ptr = &receive[0]; - MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - this->get_communicator(), &status); + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + this->get_communicator(), &status); + AssertThrowMPI(ierr); cellinfo.unpack_data(receive); const unsigned int cells = cellinfo.tree_index.size(); @@ -3250,7 +3260,10 @@ namespace parallel // complete all sends, so that we can // safely destroy the buffers. if (requests.size() > 0) - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } //check all msgs got sent and received Assert(Utilities::MPI::sum(needs_to_get_cells.size(), this->get_communicator()) diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index af05030c33..46e4f94af7 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -197,13 +197,14 @@ namespace parallel unsigned int send_value = number_cache.n_locally_owned_active_cells[my_subdomain]; - MPI_Allgather (&send_value, - 1, - MPI_UNSIGNED, - &number_cache.n_locally_owned_active_cells[0], - 1, - MPI_UNSIGNED, - this->mpi_communicator); + const int ierr = MPI_Allgather (&send_value, + 1, + MPI_UNSIGNED, + &number_cache.n_locally_owned_active_cells[0], + 1, + MPI_UNSIGNED, + this->mpi_communicator); + AssertThrowMPI(ierr); number_cache.n_global_active_cells = std::accumulate (number_cache.n_locally_owned_active_cells.begin(), diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index a2004fd2f0..c9ad02a385 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -1160,9 +1160,10 @@ namespace internal types::global_dof_index shift = 0; //set rcounts based on new_numbers: int cur_count = new_numbers_copy.size (); - MPI_Allgather (&cur_count, 1, MPI_INT, - &rcounts[0], 1, MPI_INT, - tr->get_communicator ()); + int ierr = MPI_Allgather (&cur_count, 1, MPI_INT, + &rcounts[0], 1, MPI_INT, + tr->get_communicator ()); + AssertThrowMPI(ierr); for (unsigned int i = 0; i < n_cpu; i++) { @@ -1172,12 +1173,13 @@ namespace internal Assert(((int)new_numbers_copy.size()) == rcounts[Utilities::MPI::this_mpi_process (tr->get_communicator ())], ExcInternalError()); - MPI_Allgatherv (&new_numbers_copy[0], new_numbers_copy.size (), - DEAL_II_DOF_INDEX_MPI_TYPE, - &gathered_new_numbers[0], &rcounts[0], - &displs[0], - DEAL_II_DOF_INDEX_MPI_TYPE, - tr->get_communicator ()); + ierr = MPI_Allgatherv (&new_numbers_copy[0], new_numbers_copy.size (), + DEAL_II_DOF_INDEX_MPI_TYPE, + &gathered_new_numbers[0], &rcounts[0], + &displs[0], + DEAL_II_DOF_INDEX_MPI_TYPE, + tr->get_communicator ()); + AssertThrowMPI(ierr); } // put new numbers according to the current locally_owned_dofs_per_processor IndexSets @@ -1641,9 +1643,10 @@ namespace internal // that the packet has been // received it->second.pack_data (sendbuffers[idx]); - MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(), - MPI_BYTE, it->first, - 1100101, tria.get_communicator(), &requests[idx]); + const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(), + MPI_BYTE, it->first, + 1100101, tria.get_communicator(), &requests[idx]); + AssertThrowMPI(ierr); } //* receive requests and reply @@ -1657,13 +1660,16 @@ namespace internal MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, 1100101, tria.get_communicator(), &status); - MPI_Get_count(&status, MPI_BYTE, &len); + int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100101, tria.get_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); receive.resize(len); char *ptr = &receive[0]; - MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria.get_communicator(), &status); + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria.get_communicator(), &status); + AssertThrowMPI(ierr); cellinfo.unpack_data(receive); @@ -1688,9 +1694,10 @@ namespace internal //send reply cellinfo.pack_data(reply_buffers[idx]); - MPI_Isend(&(reply_buffers[idx])[0], reply_buffers[idx].size(), - MPI_BYTE, status.MPI_SOURCE, - 1100102, tria.get_communicator(), &reply_requests[idx]); + ierr = MPI_Isend(&(reply_buffers[idx])[0], reply_buffers[idx].size(), + MPI_BYTE, status.MPI_SOURCE, + 1100102, tria.get_communicator(), &reply_requests[idx]); + AssertThrowMPI(ierr); } // * finally receive the replies @@ -1701,13 +1708,16 @@ namespace internal MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, 1100102, tria.get_communicator(), &status); - MPI_Get_count(&status, MPI_BYTE, &len); + int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100102, tria.get_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); receive.resize(len); char *ptr = &receive[0]; - MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria.get_communicator(), &status); + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria.get_communicator(), &status); + AssertThrowMPI(ierr); cellinfo.unpack_data(receive); if (cellinfo.tree_index.size()==0) @@ -1739,9 +1749,15 @@ namespace internal // complete all sends, so that we can // safely destroy the buffers. if (requests.size() > 0) - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } if (reply_requests.size() > 0) - MPI_Waitall(reply_requests.size(), &reply_requests[0], MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall(reply_requests.size(), &reply_requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } } @@ -1909,9 +1925,10 @@ namespace internal // that the packet has been // received it->second.pack_data (*buffer); - MPI_Isend(&(*buffer)[0], buffer->size(), - MPI_BYTE, it->first, - 123, tr->get_communicator(), &requests[idx]); + const int ierr = MPI_Isend(&(*buffer)[0], buffer->size(), + MPI_BYTE, it->first, + 123, tr->get_communicator(), &requests[idx]); + AssertThrowMPI(ierr); } @@ -1955,13 +1972,16 @@ namespace internal { MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, 123, tr->get_communicator(), &status); - MPI_Get_count(&status, MPI_BYTE, &len); + int ierr = MPI_Probe(MPI_ANY_SOURCE, 123, tr->get_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); receive.resize(len); char *ptr = &receive[0]; - MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tr->get_communicator(), &status); + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tr->get_communicator(), &status); + AssertThrowMPI(ierr); typename types::cellinfo cellinfo; cellinfo.unpack_data(receive); @@ -1994,7 +2014,10 @@ namespace internal // complete all sends, so that we can // safely destroy the buffers. if (requests.size() > 0) - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } #ifdef DEBUG @@ -2005,8 +2028,10 @@ namespace internal unsigned int sent=needs_to_get_cells.size(); unsigned int recv=senders.size(); - MPI_Allreduce(&sent, &sum_send, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator()); - MPI_Allreduce(&recv, &sum_recv, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator()); + int ierr = MPI_Allreduce(&sent, &sum_send, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator()); + AssertThrowMPI(ierr); + ierr = MPI_Allreduce(&recv, &sum_recv, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator()); + AssertThrowMPI(ierr); Assert(sum_send==sum_recv, ExcInternalError()); } #endif @@ -2037,7 +2062,8 @@ namespace internal // processors from which we expect // messages, and by using different // tags for phase 1 and 2 - MPI_Barrier(tr->get_communicator()); + const int ierr = MPI_Barrier(tr->get_communicator()); + AssertThrowMPI(ierr); #endif } @@ -2132,11 +2158,12 @@ namespace internal //shift ids to make them unique number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus); - MPI_Allgather ( &number_cache.n_locally_owned_dofs, - 1, DEAL_II_DOF_INDEX_MPI_TYPE, - &number_cache.n_locally_owned_dofs_per_processor[0], - 1, DEAL_II_DOF_INDEX_MPI_TYPE, - tr->get_communicator()); + const int ierr = MPI_Allgather ( &number_cache.n_locally_owned_dofs, + 1, DEAL_II_DOF_INDEX_MPI_TYPE, + &number_cache.n_locally_owned_dofs_per_processor[0], + 1, DEAL_II_DOF_INDEX_MPI_TYPE, + tr->get_communicator()); + AssertThrowMPI(ierr); const dealii::types::global_dof_index shift = std::accumulate (number_cache @@ -2365,11 +2392,12 @@ namespace internal //shift ids to make them unique number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus); - MPI_Allgather ( &number_cache.n_locally_owned_dofs, - 1, DEAL_II_DOF_INDEX_MPI_TYPE, - &number_cache.n_locally_owned_dofs_per_processor[0], - 1, DEAL_II_DOF_INDEX_MPI_TYPE, - tr->get_communicator()); + int ierr = MPI_Allgather ( &number_cache.n_locally_owned_dofs, + 1, DEAL_II_DOF_INDEX_MPI_TYPE, + &number_cache.n_locally_owned_dofs_per_processor[0], + 1, DEAL_II_DOF_INDEX_MPI_TYPE, + tr->get_communicator()); + AssertThrowMPI(ierr); const dealii::types::global_dof_index shift = std::accumulate (number_cache @@ -2466,7 +2494,8 @@ namespace internal // This barrier is crucial so that messages between phase 1&2 don't // mix. - MPI_Barrier(tr->get_communicator()); + const int ierr = MPI_Barrier(tr->get_communicator()); + AssertThrowMPI(ierr); // Phase 2, only request the cells that were not completed in Phase // 1. @@ -2724,9 +2753,10 @@ namespace internal my_data.resize(max_size); std::vector buffer(max_size*n_cpus); - MPI_Allgather(&my_data[0], max_size, MPI_BYTE, - &buffer[0], max_size, MPI_BYTE, - tr->get_communicator()); + const int ierr = MPI_Allgather(&my_data[0], max_size, MPI_BYTE, + &buffer[0], max_size, MPI_BYTE, + tr->get_communicator()); + AssertThrowMPI(ierr); number_cache.locally_owned_dofs_per_processor.resize (n_cpus); number_cache.n_locally_owned_dofs_per_processor.resize (n_cpus); diff --git a/source/dofs/dof_renumbering.cc b/source/dofs/dof_renumbering.cc index cd1af2a3a8..2076f9a8bb 100644 --- a/source/dofs/dof_renumbering.cc +++ b/source/dofs/dof_renumbering.cc @@ -770,11 +770,12 @@ namespace DoFRenumbering all_dof_counts(fe_collection.n_components() * Utilities::MPI::n_mpi_processes (tria->get_communicator())); - MPI_Allgather ( &local_dof_count[0], - n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, - &all_dof_counts[0], - n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, - tria->get_communicator()); + const int ierr = MPI_Allgather ( &local_dof_count[0], + n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, + &all_dof_counts[0], + n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, + tria->get_communicator()); + AssertThrowMPI(ierr); for (unsigned int i=0; ilocally_owned_subdomain()+i] @@ -1057,11 +1058,12 @@ namespace DoFRenumbering all_dof_counts(fe_collection.n_components() * Utilities::MPI::n_mpi_processes (tria->get_communicator())); - MPI_Allgather ( &local_dof_count[0], - n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, - &all_dof_counts[0], - n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, - tria->get_communicator()); + const int ierr = MPI_Allgather ( &local_dof_count[0], + n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, + &all_dof_counts[0], + n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE, + tria->get_communicator()); + AssertThrowMPI(ierr); for (unsigned int i=0; ilocally_owned_subdomain()+i] diff --git a/source/dofs/dof_tools.cc b/source/dofs/dof_tools.cc index 90e1a0b00a..3475ddf8da 100644 --- a/source/dofs/dof_tools.cc +++ b/source/dofs/dof_tools.cc @@ -1704,9 +1704,10 @@ namespace DoFTools { std::vector local_dof_count = dofs_per_component; - MPI_Allreduce ( &local_dof_count[0], &dofs_per_component[0], n_target_components, - DEAL_II_DOF_INDEX_MPI_TYPE, - MPI_SUM, tria->get_communicator()); + const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_component[0], n_target_components, + DEAL_II_DOF_INDEX_MPI_TYPE, + MPI_SUM, tria->get_communicator()); + AssertThrowMPI (ierr); } #endif } @@ -1781,10 +1782,11 @@ namespace DoFTools (&dof_handler.get_triangulation()))) { std::vector local_dof_count = dofs_per_block; - MPI_Allreduce ( &local_dof_count[0], &dofs_per_block[0], - n_target_blocks, - DEAL_II_DOF_INDEX_MPI_TYPE, - MPI_SUM, tria->get_communicator()); + const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_block[0], + n_target_blocks, + DEAL_II_DOF_INDEX_MPI_TYPE, + MPI_SUM, tria->get_communicator()); + AssertThrowMPI (ierr); } #endif } diff --git a/source/fe/fe_tools_extrapolate.cc b/source/fe/fe_tools_extrapolate.cc index 2e4a1163d5..f8664dd84e 100755 --- a/source/fe/fe_tools_extrapolate.cc +++ b/source/fe/fe_tools_extrapolate.cc @@ -1086,12 +1086,13 @@ namespace FETools destinations.push_back (it->receiver); it->pack_data (*buffer); - MPI_Isend (&(*buffer)[0], buffer->size(), - MPI_BYTE, - it->receiver, - round, - communicator, - &requests[idx]); + const int ierr = MPI_Isend (&(*buffer)[0], buffer->size(), + MPI_BYTE, + it->receiver, + round, + communicator, + &requests[idx]); + AssertThrowMPI(ierr); } Assert(destinations.size()==cells_to_send.size(), ExcInternalError()); @@ -1106,12 +1107,15 @@ namespace FETools { MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status); - MPI_Get_count(&status, MPI_BYTE, &len); + int ierr = MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); receive.resize (len); char *buf = &receive[0]; - MPI_Recv (buf, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, communicator, &status); + ierr = MPI_Recv (buf, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, communicator, &status); + AssertThrowMPI(ierr); cell_data.unpack_data (receive); @@ -1124,7 +1128,10 @@ namespace FETools } if (requests.size () > 0) - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } // finally sort the list of cells std::sort (received_cells.begin (), received_cells.end ()); diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index d75ec11de1..5d667876a6 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -1800,8 +1800,9 @@ next_cell: // processors and shifting the indices accordingly const unsigned int n_cpu = Utilities::MPI::n_mpi_processes(triangulation.get_communicator()); std::vector indices(n_cpu); - MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, &indices[0], - indices.size(), DEAL_II_DOF_INDEX_MPI_TYPE, triangulation.get_communicator()); + int ierr = MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, &indices[0], + indices.size(), DEAL_II_DOF_INDEX_MPI_TYPE, triangulation.get_communicator()); + AssertThrowMPI(ierr); const types::global_vertex_index shift = std::accumulate(&indices[0], &indices[0]+triangulation.locally_owned_subdomain(),0); @@ -1841,8 +1842,9 @@ next_cell: } // Send the message - MPI_Isend(&vertices_send_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE, - destination, 0, triangulation.get_communicator(), &first_requests[i]); + ierr = MPI_Isend(&vertices_send_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE, + destination, 0, triangulation.get_communicator(), &first_requests[i]); + AssertThrowMPI(ierr); } // Receive the first message @@ -1859,8 +1861,9 @@ next_cell: vertices_recv_buffers[i].resize(buffer_size); // Receive the message - MPI_Recv(&vertices_recv_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE, - source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE); + ierr = MPI_Recv(&vertices_recv_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE, + source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); } @@ -1893,8 +1896,9 @@ next_cell: } // Send the message - MPI_Isend(&cellids_send_buffers[i][0], buffer_size, MPI_CHAR, - destination, 0, triangulation.get_communicator(), &second_requests[i]); + ierr = MPI_Isend(&cellids_send_buffers[i][0], buffer_size, MPI_CHAR, + destination, 0, triangulation.get_communicator(), &second_requests[i]); + AssertThrowMPI(ierr); } // Receive the second message @@ -1908,8 +1912,9 @@ next_cell: cellids_recv_buffers[i].resize(buffer_size); // Receive the message - MPI_Recv(&cellids_recv_buffers[i][0],buffer_size, MPI_CHAR, - source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE); + ierr = MPI_Recv(&cellids_recv_buffers[i][0],buffer_size, MPI_CHAR, + source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); } diff --git a/source/lac/petsc_matrix_base.cc b/source/lac/petsc_matrix_base.cc index 350cd54842..76f59ade32 100644 --- a/source/lac/petsc_matrix_base.cc +++ b/source/lac/petsc_matrix_base.cc @@ -207,6 +207,8 @@ namespace PETScWrappers void MatrixBase::compress (const VectorOperation::values operation) { + int ierr; + (void)ierr; #ifdef DEBUG #ifdef DEAL_II_WITH_MPI // Check that all processors agree that last_action is the same (or none!) @@ -214,8 +216,9 @@ namespace PETScWrappers int my_int_last_action = last_action; int all_int_last_action; - MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT, - MPI_BOR, get_mpi_communicator()); + ierr = MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT, + MPI_BOR, get_mpi_communicator()); + AssertThrowMPI(ierr); AssertThrow(all_int_last_action != (VectorOperation::add | VectorOperation::insert), ExcMessage("Error: not all processors agree on the last VectorOperation before this compress() call.")); @@ -227,7 +230,6 @@ namespace PETScWrappers ExcMessage("Missing compress() or calling with wrong VectorOperation argument.")); // flush buffers - int ierr; ierr = MatAssemblyBegin (matrix,MAT_FINAL_ASSEMBLY); AssertThrow (ierr == 0, ExcPETScError(ierr)); diff --git a/source/lac/petsc_parallel_vector.cc b/source/lac/petsc_parallel_vector.cc index c1364370f3..50bc5f57f6 100644 --- a/source/lac/petsc_parallel_vector.cc +++ b/source/lac/petsc_parallel_vector.cc @@ -121,8 +121,9 @@ namespace PETScWrappers // mismatch (may not be true for every proc) int k_global, k = ((size() != n) || (local_size() != local_sz)); - MPI_Allreduce (&k, &k_global, 1, - MPI_INT, MPI_LOR, communicator); + int ierr = MPI_Allreduce (&k, &k_global, 1, + MPI_INT, MPI_LOR, communicator); + AssertThrowMPI(ierr); if (k_global || has_ghost_elements()) { @@ -134,7 +135,6 @@ namespace PETScWrappers // AssertThrow (ierr == 0, ExcPETScError(ierr)); // so let's go the slow way: - int ierr; #if DEAL_II_PETSC_VERSION_LT(3,2,0) ierr = VecDestroy (vector); @@ -413,7 +413,8 @@ namespace PETScWrappers i++) { // This is slow, but most likely only used to debug. - MPI_Barrier(communicator); + ierr = MPI_Barrier(communicator); + AssertThrowMPI(ierr); if (i == Utilities::MPI::this_mpi_process(communicator)) { if (across) diff --git a/source/lac/petsc_vector_base.cc b/source/lac/petsc_vector_base.cc index febffa9c45..4d36f0e414 100644 --- a/source/lac/petsc_vector_base.cc +++ b/source/lac/petsc_vector_base.cc @@ -405,6 +405,8 @@ namespace PETScWrappers void VectorBase::compress (const VectorOperation::values operation) { + int ierr; + (void)ierr; #ifdef DEBUG #ifdef DEAL_II_WITH_MPI // Check that all processors agree that last_action is the same (or none!) @@ -412,8 +414,9 @@ namespace PETScWrappers int my_int_last_action = last_action; int all_int_last_action; - MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT, - MPI_BOR, get_mpi_communicator()); + ierr = MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT, + MPI_BOR, get_mpi_communicator()); + AssertThrowMPI(ierr); AssertThrow(all_int_last_action != (::dealii::VectorOperation::add | ::dealii::VectorOperation::insert), ExcMessage("Error: not all processors agree on the last VectorOperation before this compress() call.")); @@ -438,7 +441,6 @@ namespace PETScWrappers // we still need to call // VecAssemblyBegin/End on all // processors. - int ierr; ierr = VecAssemblyBegin(vector); AssertThrow (ierr == 0, ExcPETScError(ierr)); ierr = VecAssemblyEnd(vector); diff --git a/source/lac/sparsity_tools.cc b/source/lac/sparsity_tools.cc index 850e3501d1..f57b2cfd2e 100644 --- a/source/lac/sparsity_tools.cc +++ b/source/lac/sparsity_tools.cc @@ -595,13 +595,16 @@ namespace SparsityTools { unsigned int idx=0; for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx) - MPI_Isend(&(it->second[0]), - it->second.size(), - DEAL_II_DOF_INDEX_MPI_TYPE, - it->first, - 124, - mpi_comm, - &requests[idx]); + { + const int ierr = MPI_Isend(&(it->second[0]), + it->second.size(), + DEAL_II_DOF_INDEX_MPI_TYPE, + it->first, + 124, + mpi_comm, + &requests[idx]); + AssertThrowMPI(ierr); + } } { @@ -611,13 +614,16 @@ namespace SparsityTools { MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status); + int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status); + AssertThrowMPI(ierr); Assert (status.MPI_TAG==124, ExcInternalError()); - MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); + ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); + AssertThrowMPI(ierr); recv_buf.resize(len); - MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE, - status.MPI_TAG, mpi_comm, &status); + ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE, + status.MPI_TAG, mpi_comm, &status); + AssertThrowMPI(ierr); std::vector::const_iterator ptr = recv_buf.begin(); std::vector::const_iterator end = recv_buf.end(); @@ -639,7 +645,10 @@ namespace SparsityTools // complete all sends, so that we can safely destroy the buffers. if (requests.size()) - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } } @@ -717,13 +726,16 @@ namespace SparsityTools { unsigned int idx=0; for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx) - MPI_Isend(&(it->second[0]), - it->second.size(), - DEAL_II_DOF_INDEX_MPI_TYPE, - it->first, - 124, - mpi_comm, - &requests[idx]); + { + const int ierr = MPI_Isend(&(it->second[0]), + it->second.size(), + DEAL_II_DOF_INDEX_MPI_TYPE, + it->first, + 124, + mpi_comm, + &requests[idx]); + AssertThrowMPI(ierr); + } } { @@ -733,13 +745,16 @@ namespace SparsityTools { MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status); + int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status); + AssertThrowMPI(ierr); Assert (status.MPI_TAG==124, ExcInternalError()); - MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); + ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); + AssertThrowMPI(ierr); recv_buf.resize(len); - MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE, - status.MPI_TAG, mpi_comm, &status); + ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE, + status.MPI_TAG, mpi_comm, &status); + AssertThrowMPI(ierr); std::vector::const_iterator ptr = recv_buf.begin(); std::vector::const_iterator end = recv_buf.end(); @@ -761,7 +776,10 @@ namespace SparsityTools // complete all sends, so that we can safely destroy the buffers. if (requests.size()) - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } } #endif } diff --git a/source/multigrid/mg_level_global_transfer.cc b/source/multigrid/mg_level_global_transfer.cc index 1002f233ce..f5adec44bc 100644 --- a/source/multigrid/mg_level_global_transfer.cc +++ b/source/multigrid/mg_level_global_transfer.cc @@ -210,9 +210,18 @@ namespace // the receiving end will be waitng. In that case we just send // an empty message. if (data.size()) - MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); + { + const int ierr = MPI_Isend(&data[0], data.size()*sizeof(data[0]), + MPI_BYTE, dest, 71, tria->get_communicator(), + &*requests.rbegin()); + AssertThrowMPI(ierr); + } else - MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); + { + const int ierr = MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, + tria->get_communicator(), &*requests.rbegin()); + AssertThrowMPI(ierr); + } } } @@ -224,14 +233,16 @@ namespace { MPI_Status status; int len; - MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status); - MPI_Get_count(&status, MPI_BYTE, &len); + int ierr = MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); if (len==0) { - int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - AssertThrow(err==MPI_SUCCESS, ExcInternalError()); + ierr = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + AssertThrowMPI(ierr); continue; } @@ -240,9 +251,9 @@ namespace receive_buffer.resize(count); void *ptr = &receive_buffer[0]; - int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - AssertThrow(err==MPI_SUCCESS, ExcInternalError()); + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + AssertThrowMPI(ierr); for (unsigned int i=0; i 0) { - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); requests.clear(); } #ifdef DEBUG // Make sure in debug mode, that everybody sent/received all packages // on this level. If a deadlock occurs here, the list of expected // senders is not computed correctly. - MPI_Barrier(tria->get_communicator()); + const int ierr = MPI_Barrier(tria->get_communicator()); + AssertThrowMPI(ierr); #endif } #endif -- 2.39.5