From: David Wells Date: Mon, 29 Mar 2021 17:07:08 +0000 (-0400) Subject: Add some more AssertThrowMPIs. X-Git-Tag: v9.3.0-rc1~269^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F11979%2Fhead;p=dealii.git Add some more AssertThrowMPIs. --- diff --git a/source/base/mpi.cc b/source/base/mpi.cc index 619d5fa5bf..c47c1eb244 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -147,8 +147,9 @@ namespace Utilities const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small); std::vector ranks(size); - MPI_Allgather( + const int ierr = MPI_Allgather( &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small); + AssertThrowMPI(ierr); return ranks; } @@ -531,21 +532,23 @@ namespace Utilities std::vector buffer(dest_vector.size()); unsigned int n_recv_from = 0; - MPI_Reduce(dest_vector.data(), - buffer.data(), - dest_vector.size(), - MPI_UNSIGNED, - MPI_SUM, - 0, - mpi_comm); - MPI_Scatter(buffer.data(), - 1, - MPI_UNSIGNED, - &n_recv_from, - 1, - MPI_UNSIGNED, - 0, - mpi_comm); + int ierr = MPI_Reduce(dest_vector.data(), + buffer.data(), + dest_vector.size(), + MPI_UNSIGNED, + MPI_SUM, + 0, + mpi_comm); + AssertThrowMPI(ierr); + ierr = MPI_Scatter(buffer.data(), + 1, + MPI_UNSIGNED, + &n_recv_from, + 1, + MPI_UNSIGNED, + 0, + mpi_comm); + AssertThrowMPI(ierr); return n_recv_from; # endif diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index 33acfd77ba..b6639aa92f 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -76,12 +76,14 @@ namespace Utilities types::global_dof_index prefix_sum = 0; #ifdef DEAL_II_WITH_MPI - MPI_Exscan(&local_size, - &prefix_sum, - 1, - Utilities::MPI::internal::mpi_type_id(&prefix_sum), - MPI_SUM, - communicator); + const int ierr = + MPI_Exscan(&local_size, + &prefix_sum, + 1, + Utilities::MPI::internal::mpi_type_id(&prefix_sum), + MPI_SUM, + communicator); + AssertThrowMPI(ierr); #endif local_range_data = {prefix_sum, prefix_sum + local_size}; diff --git a/source/base/timer.cc b/source/base/timer.cc index 2f7e84ce86..61f75de4f3 100644 --- a/source/base/timer.cc +++ b/source/base/timer.cc @@ -889,14 +889,15 @@ TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm, std::vector receive_data(my_rank == 0 ? n_ranks : 0); std::vector result(9); #ifdef DEAL_II_WITH_MPI - MPI_Gather(&given_time, - 1, - MPI_DOUBLE, - receive_data.data(), - 1, - MPI_DOUBLE, - 0, - mpi_comm); + int ierr = MPI_Gather(&given_time, + 1, + MPI_DOUBLE, + receive_data.data(), + 1, + MPI_DOUBLE, + 0, + mpi_comm); + AssertThrowMPI(ierr); if (my_rank == 0) { // fill the received data in a pair and sort; on the way, also @@ -923,7 +924,8 @@ TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm, result[7] = data_rank[n_ranks - 1].first; result[8] = data_rank[n_ranks - 1].second; } - MPI_Bcast(result.data(), 9, MPI_DOUBLE, 0, mpi_comm); + ierr = MPI_Bcast(result.data(), 9, MPI_DOUBLE, 0, mpi_comm); + AssertThrowMPI(ierr); #endif out_stream << std::setw(10) << std::setprecision(4) << std::right; out_stream << result[0] << "s "; diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index 203a1365f8..fe6e2b1337 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -430,12 +430,14 @@ namespace parallel // 2) determine the offset of each process types::global_cell_index cell_index = 0; - MPI_Exscan(&n_locally_owned_cells, - &cell_index, - 1, - Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells), - MPI_SUM, - this->mpi_communicator); + const int ierr = + MPI_Exscan(&n_locally_owned_cells, + &cell_index, + 1, + Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells), + MPI_SUM, + this->mpi_communicator); + AssertThrowMPI(ierr); // 3) give global indices to locally-owned cells and mark all other cells as // invalid @@ -498,13 +500,14 @@ namespace parallel std::vector cell_index( this->n_global_levels(), 0); - MPI_Exscan(n_locally_owned_cells.data(), - cell_index.data(), - this->n_global_levels(), - Utilities::MPI::internal::mpi_type_id( - n_locally_owned_cells.data()), - MPI_SUM, - this->mpi_communicator); + int ierr = MPI_Exscan(n_locally_owned_cells.data(), + cell_index.data(), + this->n_global_levels(), + Utilities::MPI::internal::mpi_type_id( + n_locally_owned_cells.data()), + MPI_SUM, + this->mpi_communicator); + AssertThrowMPI(ierr); // 3) determine global number of "active" cells on each level std::vector n_cells_level( @@ -513,11 +516,13 @@ namespace parallel for (unsigned int l = 0; l < this->n_global_levels(); ++l) n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l]; - MPI_Bcast(n_cells_level.data(), - this->n_global_levels(), - Utilities::MPI::internal::mpi_type_id(n_cells_level.data()), - this->n_subdomains - 1, - this->mpi_communicator); + ierr = + MPI_Bcast(n_cells_level.data(), + this->n_global_levels(), + Utilities::MPI::internal::mpi_type_id(n_cells_level.data()), + this->n_subdomains - 1, + this->mpi_communicator); + AssertThrowMPI(ierr); // 4) give global indices to locally-owned cells on level and mark // all other cells as invalid diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index e9f07561cc..5a5cdbd593 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -4054,6 +4054,7 @@ namespace internal triangulation->get_communicator()) - 1, triangulation->get_communicator()); + AssertThrowMPI(ierr); // shift indices for (types::global_dof_index &index : renumbering) diff --git a/source/dofs/dof_renumbering.cc b/source/dofs/dof_renumbering.cc index f572a09eee..544bf90928 100644 --- a/source/dofs/dof_renumbering.cc +++ b/source/dofs/dof_renumbering.cc @@ -1384,12 +1384,13 @@ namespace DoFRenumbering #ifdef DEAL_II_WITH_MPI types::global_dof_index locally_owned_size = dof_handler.locally_owned_dofs().n_elements(); - MPI_Exscan(&locally_owned_size, - &my_starting_index, - 1, - DEAL_II_DOF_INDEX_MPI_TYPE, - MPI_SUM, - tria->get_communicator()); + const int ierr = MPI_Exscan(&locally_owned_size, + &my_starting_index, + 1, + DEAL_II_DOF_INDEX_MPI_TYPE, + MPI_SUM, + tria->get_communicator()); + AssertThrowMPI(ierr); #endif } diff --git a/source/matrix_free/vector_data_exchange.cc b/source/matrix_free/vector_data_exchange.cc index 2da6124f4e..7fe87252db 100644 --- a/source/matrix_free/vector_data_exchange.cc +++ b/source/matrix_free/vector_data_exchange.cc @@ -576,26 +576,35 @@ namespace internal sm_import_ranks.size()); for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++) - MPI_Isend(sm_export_data_this_indices.data() + - sm_export_data_this_ptr[i], - sm_export_data_this_ptr[i + 1] - - sm_export_data_this_ptr[i], - MPI_UNSIGNED, - sm_ghost_ranks[i], - 4, - comm_sm, - requests.data() + i); + { + const int ierr = MPI_Isend(sm_export_data_this_indices.data() + + sm_export_data_this_ptr[i], + sm_export_data_this_ptr[i + 1] - + sm_export_data_this_ptr[i], + MPI_UNSIGNED, + sm_ghost_ranks[i], + 4, + comm_sm, + requests.data() + i); + AssertThrowMPI(ierr); + } for (unsigned int i = 0; i < sm_import_ranks.size(); i++) - MPI_Irecv(sm_import_data_indices.data() + sm_import_data_ptr[i], - sm_import_data_ptr[i + 1] - sm_import_data_ptr[i], - MPI_UNSIGNED, - sm_import_ranks[i], - 4, - comm_sm, - requests.data() + sm_ghost_ranks.size() + i); + { + const int ierr = + MPI_Irecv(sm_import_data_indices.data() + sm_import_data_ptr[i], + sm_import_data_ptr[i + 1] - sm_import_data_ptr[i], + MPI_UNSIGNED, + sm_import_ranks[i], + 4, + comm_sm, + requests.data() + sm_ghost_ranks.size() + i); + AssertThrowMPI(ierr); + } - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); } // send sm_import_data_this to sm-neighbor -> sm_export_data_indices @@ -604,26 +613,35 @@ namespace internal sm_ghost_ranks.size()); for (unsigned int i = 0; i < sm_import_ranks.size(); i++) - MPI_Isend(sm_import_data_this_indices.data() + - sm_import_data_this_ptr[i], - sm_import_data_this_ptr[i + 1] - - sm_import_data_this_ptr[i], - MPI_UNSIGNED, - sm_import_ranks[i], - 2, - comm_sm, - requests.data() + i); + { + const int ierr = MPI_Isend(sm_import_data_this_indices.data() + + sm_import_data_this_ptr[i], + sm_import_data_this_ptr[i + 1] - + sm_import_data_this_ptr[i], + MPI_UNSIGNED, + sm_import_ranks[i], + 2, + comm_sm, + requests.data() + i); + AssertThrowMPI(ierr); + } for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++) - MPI_Irecv(sm_export_data_indices.data() + sm_export_data_ptr[i], - sm_export_data_ptr[i + 1] - sm_export_data_ptr[i], - MPI_UNSIGNED, - sm_ghost_ranks[i], - 2, - comm_sm, - requests.data() + sm_import_ranks.size() + i); + { + const int ierr = + MPI_Irecv(sm_export_data_indices.data() + sm_export_data_ptr[i], + sm_export_data_ptr[i + 1] - sm_export_data_ptr[i], + MPI_UNSIGNED, + sm_ghost_ranks[i], + 2, + comm_sm, + requests.data() + sm_import_ranks.size() + i); + AssertThrowMPI(ierr); + } - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); } // store data structures and, if needed, compress them @@ -843,23 +861,30 @@ namespace internal int dummy; // receive a signal that relevant sm neighbors are ready for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++) - MPI_Irecv(&dummy, - 0, - MPI_INT, - sm_ghost_ranks[i], - communication_channel + 0, - comm_sm, - requests.data() + sm_import_ranks.size() + i); + { + const int ierr = + MPI_Irecv(&dummy, + 0, + MPI_INT, + sm_ghost_ranks[i], + communication_channel + 0, + comm_sm, + requests.data() + sm_import_ranks.size() + i); + AssertThrowMPI(ierr); + } // signal to all relevant sm neighbors that this process is ready for (unsigned int i = 0; i < sm_import_ranks.size(); i++) - MPI_Isend(&dummy, - 0, - MPI_INT, - sm_import_ranks[i], - communication_channel + 0, - comm_sm, - requests.data() + i); + { + const int ierr = MPI_Isend(&dummy, + 0, + MPI_INT, + sm_import_ranks[i], + communication_channel + 0, + comm_sm, + requests.data() + i); + AssertThrowMPI(ierr); + } // receive data from remote processes for (unsigned int i = 0; i < ghost_targets_data.size(); i++) @@ -868,14 +893,16 @@ namespace internal n_ghost_indices_in_larger_set_by_remote_rank[i] - ghost_targets_data[i][2]; - MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset, - ghost_targets_data[i][2], - Utilities::MPI::internal::mpi_type_id(buffer.data()), - ghost_targets_data[i][0], - communication_channel + 1, - comm, - requests.data() + sm_import_ranks.size() + - sm_ghost_ranks.size() + i); + const int ierr = + MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset, + ghost_targets_data[i][2], + Utilities::MPI::internal::mpi_type_id(buffer.data()), + ghost_targets_data[i][0], + communication_channel + 1, + comm, + requests.data() + sm_import_ranks.size() + + sm_ghost_ranks.size() + i); + AssertThrowMPI(ierr); } // send data to remote processes @@ -890,14 +917,17 @@ namespace internal data_this[import_indices_data.second[j].first + l]; // send data away - MPI_Isend(temporary_storage.data() + import_targets_data[i][1], - import_targets_data[i][2], - Utilities::MPI::internal::mpi_type_id(data_this.data()), - import_targets_data[i][0], - communication_channel + 1, - comm, - requests.data() + sm_import_ranks.size() + - sm_ghost_ranks.size() + ghost_targets_data.size() + i); + const int ierr = + MPI_Isend(temporary_storage.data() + import_targets_data[i][1], + import_targets_data[i][2], + Utilities::MPI::internal::mpi_type_id(data_this.data()), + import_targets_data[i][0], + communication_channel + 1, + comm, + requests.data() + sm_import_ranks.size() + + sm_ghost_ranks.size() + ghost_targets_data.size() + + i); + AssertThrowMPI(ierr); } #endif } @@ -942,11 +972,13 @@ namespace internal c < sm_ghost_ranks.size() + ghost_targets_data.size(); c++) { - int i; - MPI_Waitany(sm_ghost_ranks.size() + ghost_targets_data.size(), - requests.data() + sm_import_ranks.size(), - &i, - MPI_STATUS_IGNORE); + int i; + const int ierr = + MPI_Waitany(sm_ghost_ranks.size() + ghost_targets_data.size(), + requests.data() + sm_import_ranks.size(), + &i, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); const auto s = split(i); i = s.second; @@ -1032,7 +1064,10 @@ namespace internal } } - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + #endif } @@ -1073,22 +1108,29 @@ namespace internal int dummy; for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++) - MPI_Isend(&dummy, - 0, - MPI_INT, - sm_ghost_ranks[i], - communication_channel + 1, - comm_sm, - requests.data() + i); + { + const int ierr = MPI_Isend(&dummy, + 0, + MPI_INT, + sm_ghost_ranks[i], + communication_channel + 1, + comm_sm, + requests.data() + i); + AssertThrowMPI(ierr); + } for (unsigned int i = 0; i < sm_import_ranks.size(); i++) - MPI_Irecv(&dummy, - 0, - MPI_INT, - sm_import_ranks[i], - communication_channel + 1, - comm_sm, - requests.data() + sm_ghost_ranks.size() + i); + { + const int ierr = + MPI_Irecv(&dummy, + 0, + MPI_INT, + sm_import_ranks[i], + communication_channel + 1, + comm_sm, + requests.data() + sm_ghost_ranks.size() + i); + AssertThrowMPI(ierr); + } for (unsigned int i = 0; i < ghost_targets_data.size(); i++) { @@ -1128,26 +1170,31 @@ namespace internal } } - MPI_Isend(buffer.data() + ghost_targets_data[i][1], - ghost_targets_data[i][2], - Utilities::MPI::internal::mpi_type_id(buffer.data()), - ghost_targets_data[i][0], - communication_channel + 0, - comm, - requests.data() + sm_ghost_ranks.size() + - sm_import_ranks.size() + i); + const int ierr = + MPI_Isend(buffer.data() + ghost_targets_data[i][1], + ghost_targets_data[i][2], + Utilities::MPI::internal::mpi_type_id(buffer.data()), + ghost_targets_data[i][0], + communication_channel + 0, + comm, + requests.data() + sm_ghost_ranks.size() + + sm_import_ranks.size() + i); + AssertThrowMPI(ierr); } for (unsigned int i = 0; i < import_targets_data.size(); i++) - MPI_Irecv(temporary_storage.data() + import_targets_data[i][1], - import_targets_data[i][2], - Utilities::MPI::internal::mpi_type_id( - temporary_storage.data()), - import_targets_data[i][0], - communication_channel + 0, - comm, - requests.data() + sm_ghost_ranks.size() + - sm_import_ranks.size() + ghost_targets_data.size() + i); + { + const int ierr = MPI_Irecv( + temporary_storage.data() + import_targets_data[i][1], + import_targets_data[i][2], + Utilities::MPI::internal::mpi_type_id(temporary_storage.data()), + import_targets_data[i][0], + communication_channel + 0, + comm, + requests.data() + sm_ghost_ranks.size() + sm_import_ranks.size() + + ghost_targets_data.size() + i); + AssertThrowMPI(ierr); + } #endif } @@ -1202,12 +1249,14 @@ namespace internal ghost_targets_data.size(); c++) { - int i; - MPI_Waitany(sm_import_ranks.size() + import_targets_data.size() + - ghost_targets_data.size(), - requests.data() + sm_ghost_ranks.size(), - &i, - MPI_STATUS_IGNORE); + int i; + const int ierr = + MPI_Waitany(sm_import_ranks.size() + import_targets_data.size() + + ghost_targets_data.size(), + requests.data() + sm_ghost_ranks.size(), + &i, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); const auto &s = split(i); i = s.second; @@ -1268,7 +1317,9 @@ namespace internal } } - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); #endif } diff --git a/source/particles/generators.cc b/source/particles/generators.cc index d12445fa95..60c78be8c0 100644 --- a/source/particles/generators.cc +++ b/source/particles/generators.cc @@ -129,12 +129,13 @@ namespace Particles // The local particle start index is the number of all particles // generated on lower MPI ranks. - MPI_Exscan(&n_particles_to_generate, - &particle_index, - 1, - DEAL_II_PARTICLE_INDEX_MPI_TYPE, - MPI_SUM, - tria->get_communicator()); + const int ierr = MPI_Exscan(&n_particles_to_generate, + &particle_index, + 1, + DEAL_II_PARTICLE_INDEX_MPI_TYPE, + MPI_SUM, + tria->get_communicator()); + AssertThrowMPI(ierr); } #endif @@ -295,12 +296,13 @@ namespace Particles dynamic_cast *>( &triangulation)) { - MPI_Exscan(&local_weight_integral, - &local_start_weight, - 1, - MPI_DOUBLE, - MPI_SUM, - tria->get_communicator()); + const int ierr = MPI_Exscan(&local_weight_integral, + &local_start_weight, + 1, + MPI_DOUBLE, + MPI_SUM, + tria->get_communicator()); + AssertThrowMPI(ierr); } #endif