&mpi_communicator_inactive_with_root);
AssertThrowMPI(ierr);
- MPI_Group_free(&all_group);
- MPI_Group_free(&inactive_with_root_group);
+ ierr = MPI_Group_free(&all_group);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Group_free(&inactive_with_root_group);
+ AssertThrowMPI(ierr);
// Double check that the process with rank 0 in subgroup is active:
#ifdef DEBUG
std::vector<int> size_all_data(n_procs);
// Exchanging the number of bboxes
- MPI_Allgather(&n_local_data, 1, MPI_INT,
- &(size_all_data[0]), 1, MPI_INT,
- mpi_communicator);
+ int ierr = MPI_Allgather(&n_local_data, 1, MPI_INT,
+ &(size_all_data[0]), 1, MPI_INT,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
// Now computing the the displacement, relative to recvbuf,
// at which to store the incoming data
// Allocating a vector to contain all the received data
std::vector<double> data_array(rdispls.back() + size_all_data.back());
- MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
- &(data_array[0]), &(size_all_data[0]),
- &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
+ ierr = MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
+ &(data_array[0]), &(size_all_data[0]),
+ &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
+ AssertThrowMPI(ierr);
// Step 4: create the array of bboxes for output
std::vector< std::vector< BoundingBox<spacedim> > > global_bboxes(n_procs);
Cblacs_gridexit(union_blacs_context);
if (mpi_communicator_union != MPI_COMM_NULL)
- MPI_Comm_free(&mpi_communicator_union);
- MPI_Group_free(&group_source);
- MPI_Group_free(&group_dest);
- MPI_Group_free(&group_union);
+ {
+ ierr = MPI_Comm_free(&mpi_communicator_union);
+ AssertThrowMPI(ierr);
+ }
+ ierr = MPI_Group_free(&group_source);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Group_free(&group_dest);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Group_free(&group_union);
+ AssertThrowMPI(ierr);
}
else
//process is active in the process grid
// gather the number of local rows and columns from all processes
std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
- MPI_Allgather(&tmp.n_local_rows,1,MPI_INT,proc_n_local_rows.data(),1,MPI_INT,tmp.grid->mpi_communicator);
- MPI_Allgather(&tmp.n_local_columns,1,MPI_INT,proc_n_local_columns.data(),1,MPI_INT,tmp.grid->mpi_communicator);
+ int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
+ proc_n_local_rows.data(), 1, MPI_INT,
+ tmp.grid->mpi_communicator);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
+ proc_n_local_columns.data(), 1, MPI_INT,
+ tmp.grid->mpi_communicator);
+ AssertThrowMPI(ierr);
const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
// before writing the state and property to file wait for
// all processes to finish writing the matrix content to the file
- MPI_Barrier(tmp.grid->mpi_communicator);
+ ierr = MPI_Barrier(tmp.grid->mpi_communicator);
+ AssertThrowMPI(ierr);
// only root process will write state and property to the file
if (tmp.grid->this_mpi_process==0)
// gather the number of local rows and columns from all processes
std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
- MPI_Allgather(&tmp.n_local_rows,1,MPI_INT,proc_n_local_rows.data(),1,MPI_INT,tmp.grid->mpi_communicator);
- MPI_Allgather(&tmp.n_local_columns,1,MPI_INT,proc_n_local_columns.data(),1,MPI_INT,tmp.grid->mpi_communicator);
+ int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
+ proc_n_local_rows.data(), 1, MPI_INT,
+ tmp.grid->mpi_communicator);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
+ proc_n_local_columns.data(), 1, MPI_INT,
+ tmp.grid->mpi_communicator);
+ AssertThrowMPI(ierr);
const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
#ifdef DEAL_II_WITH_MPI
types::particle_index particles_to_add_locally = positions.size();
- MPI_Scan(&particles_to_add_locally, &local_start_index, 1, PARTICLE_INDEX_MPI_TYPE, MPI_SUM, triangulation->get_communicator());
+ const int ierr = MPI_Scan(&particles_to_add_locally, &local_start_index, 1,
+ PARTICLE_INDEX_MPI_TYPE, MPI_SUM,
+ triangulation->get_communicator());
+ AssertThrowMPI(ierr);
local_start_index -= particles_to_add_locally;
#endif
{
std::vector<MPI_Request> n_requests(2*n_neighbors);
for (unsigned int i=0; i<n_neighbors; ++i)
- MPI_Irecv(&(n_recv_data[i]), 1, MPI_INT, neighbors[i], 0, triangulation->get_communicator(), &(n_requests[2*i]));
+ {
+ const int ierr = MPI_Irecv(&(n_recv_data[i]), 1, MPI_INT, neighbors[i],
+ 0, triangulation->get_communicator(), &(n_requests[2*i]));
+ AssertThrowMPI(ierr);
+ }
for (unsigned int i=0; i<n_neighbors; ++i)
- MPI_Isend(&(n_send_data[i]), 1, MPI_INT, neighbors[i], 0, triangulation->get_communicator(), &(n_requests[2*i+1]));
- MPI_Waitall(2*n_neighbors,&n_requests[0],MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Isend(&(n_send_data[i]), 1, MPI_INT, neighbors[i],
+ 0, triangulation->get_communicator(), &(n_requests[2*i+1]));
+ AssertThrowMPI(ierr);
+ }
+ const int ierr = MPI_Waitall(2*n_neighbors,&n_requests[0],MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
}
// Determine how many particles and data we will receive
for (unsigned int i=0; i<n_neighbors; ++i)
if (n_recv_data[i] > 0)
{
- MPI_Irecv(&(recv_data[recv_offsets[i]]), n_recv_data[i], MPI_CHAR, neighbors[i], 1, triangulation->get_communicator(),&(requests[send_ops]));
+ const int ierr = MPI_Irecv(&(recv_data[recv_offsets[i]]), n_recv_data[i], MPI_CHAR,
+ neighbors[i], 1, triangulation->get_communicator(),
+ &(requests[send_ops]));
+ AssertThrowMPI(ierr);
send_ops++;
}
for (unsigned int i=0; i<n_neighbors; ++i)
if (n_send_data[i] > 0)
{
- MPI_Isend(&(send_data[send_offsets[i]]), n_send_data[i], MPI_CHAR, neighbors[i], 1, triangulation->get_communicator(),&(requests[send_ops+recv_ops]));
+ const int ierr = MPI_Isend(&(send_data[send_offsets[i]]), n_send_data[i], MPI_CHAR,
+ neighbors[i], 1, triangulation->get_communicator(),
+ &(requests[send_ops+recv_ops]));
+ AssertThrowMPI(ierr);
recv_ops++;
}
- MPI_Waitall(send_ops+recv_ops,&requests[0],MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(send_ops+recv_ops,&requests[0],MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
}
// Put the received particles into the domain if they are in the triangulation