From: David Wells Date: Sun, 6 May 2018 03:46:19 +0000 (-0400) Subject: Add some more MPI checks. X-Git-Tag: v9.1.0-rc1~1207^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=71c148d0cbdc551785dfd62540d3fa4093c60678;p=dealii.git Add some more MPI checks. --- diff --git a/source/base/process_grid.cc b/source/base/process_grid.cc index 362749dd32..9f04655539 100644 --- a/source/base/process_grid.cc +++ b/source/base/process_grid.cc @@ -175,8 +175,10 @@ namespace Utilities &mpi_communicator_inactive_with_root); AssertThrowMPI(ierr); - MPI_Group_free(&all_group); - MPI_Group_free(&inactive_with_root_group); + ierr = MPI_Group_free(&all_group); + AssertThrowMPI(ierr); + ierr = MPI_Group_free(&inactive_with_root_group); + AssertThrowMPI(ierr); // Double check that the process with rank 0 in subgroup is active: #ifdef DEBUG diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index 1bdc4a4401..90c431d3df 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -4621,9 +4621,10 @@ next_cell: std::vector size_all_data(n_procs); // Exchanging the number of bboxes - MPI_Allgather(&n_local_data, 1, MPI_INT, - &(size_all_data[0]), 1, MPI_INT, - mpi_communicator); + int ierr = MPI_Allgather(&n_local_data, 1, MPI_INT, + &(size_all_data[0]), 1, MPI_INT, + mpi_communicator); + AssertThrowMPI(ierr); // Now computing the the displacement, relative to recvbuf, // at which to store the incoming data @@ -4636,9 +4637,10 @@ next_cell: // Allocating a vector to contain all the received data std::vector data_array(rdispls.back() + size_all_data.back()); - MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE, - &(data_array[0]), &(size_all_data[0]), - &(rdispls[0]), MPI_DOUBLE, mpi_communicator); + ierr = MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE, + &(data_array[0]), &(size_all_data[0]), + &(rdispls[0]), MPI_DOUBLE, mpi_communicator); + AssertThrowMPI(ierr); // Step 4: create the array of bboxes for output std::vector< std::vector< BoundingBox > > global_bboxes(n_procs); diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index ae8cea0662..ec62499594 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -473,10 +473,16 @@ ScaLAPACKMatrix::copy_to (ScaLAPACKMatrix &dest) const Cblacs_gridexit(union_blacs_context); if (mpi_communicator_union != MPI_COMM_NULL) - MPI_Comm_free(&mpi_communicator_union); - MPI_Group_free(&group_source); - MPI_Group_free(&group_dest); - MPI_Group_free(&group_union); + { + ierr = MPI_Comm_free(&mpi_communicator_union); + AssertThrowMPI(ierr); + } + ierr = MPI_Group_free(&group_source); + AssertThrowMPI(ierr); + ierr = MPI_Group_free(&group_dest); + AssertThrowMPI(ierr); + ierr = MPI_Group_free(&group_union); + AssertThrowMPI(ierr); } else //process is active in the process grid @@ -1792,8 +1798,14 @@ void ScaLAPACKMatrix::save_parallel(const char *filename, // gather the number of local rows and columns from all processes std::vector proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes); - MPI_Allgather(&tmp.n_local_rows,1,MPI_INT,proc_n_local_rows.data(),1,MPI_INT,tmp.grid->mpi_communicator); - MPI_Allgather(&tmp.n_local_columns,1,MPI_INT,proc_n_local_columns.data(),1,MPI_INT,tmp.grid->mpi_communicator); + int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT, + proc_n_local_rows.data(), 1, MPI_INT, + tmp.grid->mpi_communicator); + AssertThrowMPI(ierr); + ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT, + proc_n_local_columns.data(), 1, MPI_INT, + tmp.grid->mpi_communicator); + AssertThrowMPI(ierr); const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator)); @@ -1839,7 +1851,8 @@ void ScaLAPACKMatrix::save_parallel(const char *filename, // before writing the state and property to file wait for // all processes to finish writing the matrix content to the file - MPI_Barrier(tmp.grid->mpi_communicator); + ierr = MPI_Barrier(tmp.grid->mpi_communicator); + AssertThrowMPI(ierr); // only root process will write state and property to the file if (tmp.grid->this_mpi_process==0) @@ -2139,8 +2152,14 @@ void ScaLAPACKMatrix::load_parallel(const char *filename) // gather the number of local rows and columns from all processes std::vector proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes); - MPI_Allgather(&tmp.n_local_rows,1,MPI_INT,proc_n_local_rows.data(),1,MPI_INT,tmp.grid->mpi_communicator); - MPI_Allgather(&tmp.n_local_columns,1,MPI_INT,proc_n_local_columns.data(),1,MPI_INT,tmp.grid->mpi_communicator); + int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT, + proc_n_local_rows.data(), 1, MPI_INT, + tmp.grid->mpi_communicator); + AssertThrowMPI(ierr); + ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT, + proc_n_local_columns.data(), 1, MPI_INT, + tmp.grid->mpi_communicator); + AssertThrowMPI(ierr); const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator)); diff --git a/source/particles/particle_handler.cc b/source/particles/particle_handler.cc index 3bc443bbd9..e786eef43d 100644 --- a/source/particles/particle_handler.cc +++ b/source/particles/particle_handler.cc @@ -301,7 +301,10 @@ namespace Particles #ifdef DEAL_II_WITH_MPI types::particle_index particles_to_add_locally = positions.size(); - MPI_Scan(&particles_to_add_locally, &local_start_index, 1, PARTICLE_INDEX_MPI_TYPE, MPI_SUM, triangulation->get_communicator()); + const int ierr = MPI_Scan(&particles_to_add_locally, &local_start_index, 1, + PARTICLE_INDEX_MPI_TYPE, MPI_SUM, + triangulation->get_communicator()); + AssertThrowMPI(ierr); local_start_index -= particles_to_add_locally; #endif @@ -777,10 +780,19 @@ namespace Particles { std::vector n_requests(2*n_neighbors); for (unsigned int i=0; iget_communicator(), &(n_requests[2*i])); + { + const int ierr = MPI_Irecv(&(n_recv_data[i]), 1, MPI_INT, neighbors[i], + 0, triangulation->get_communicator(), &(n_requests[2*i])); + AssertThrowMPI(ierr); + } for (unsigned int i=0; iget_communicator(), &(n_requests[2*i+1])); - MPI_Waitall(2*n_neighbors,&n_requests[0],MPI_STATUSES_IGNORE); + { + const int ierr = MPI_Isend(&(n_send_data[i]), 1, MPI_INT, neighbors[i], + 0, triangulation->get_communicator(), &(n_requests[2*i+1])); + AssertThrowMPI(ierr); + } + const int ierr = MPI_Waitall(2*n_neighbors,&n_requests[0],MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); } // Determine how many particles and data we will receive @@ -803,17 +815,24 @@ namespace Particles for (unsigned int i=0; i 0) { - MPI_Irecv(&(recv_data[recv_offsets[i]]), n_recv_data[i], MPI_CHAR, neighbors[i], 1, triangulation->get_communicator(),&(requests[send_ops])); + const int ierr = MPI_Irecv(&(recv_data[recv_offsets[i]]), n_recv_data[i], MPI_CHAR, + neighbors[i], 1, triangulation->get_communicator(), + &(requests[send_ops])); + AssertThrowMPI(ierr); send_ops++; } for (unsigned int i=0; i 0) { - MPI_Isend(&(send_data[send_offsets[i]]), n_send_data[i], MPI_CHAR, neighbors[i], 1, triangulation->get_communicator(),&(requests[send_ops+recv_ops])); + const int ierr = MPI_Isend(&(send_data[send_offsets[i]]), n_send_data[i], MPI_CHAR, + neighbors[i], 1, triangulation->get_communicator(), + &(requests[send_ops+recv_ops])); + AssertThrowMPI(ierr); recv_ops++; } - MPI_Waitall(send_ops+recv_ops,&requests[0],MPI_STATUSES_IGNORE); + const int ierr = MPI_Waitall(send_ops+recv_ops,&requests[0],MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); } // Put the received particles into the domain if they are in the triangulation