<h3>General</h3>
<ol>
+<li> Improved: the error codes for all MPI functions are now checked and, if the
+ MPI function failed for any reason, an exception with a helpful message is
+ thrown.
+ <br>
+ (David Wells, 2016/11/09)
+</li>
+
<li>
Fixed: We have run the PVS static analysis checker on the entire code base,
to see what possible problems it uncovers (see
<br>
(Rajat Arora, 2016/10/29)
</li>
-
+
<li> New: Add MatrixFreeOperators::MassOperator representing a mass matrix.
<br>
(Daniel Arndt, 2016/10/27)
#ifdef DEAL_II_WITH_MPI
if (job_supports_mpi())
{
- MPI_Allreduce (values != output
- ?
- // TODO This const_cast is only needed for older
- // (e.g., openMPI 1.6, released in 2012)
- // implementations of MPI-2. It is not needed as of
- // MPI-3 and we should remove it at some point in
- // the future.
- const_cast<void *>(static_cast<const void *>(values))
- :
- MPI_IN_PLACE,
- static_cast<void *>(output),
- static_cast<int>(size),
- internal::mpi_type_id(values),
- mpi_op,
- mpi_communicator);
+ const int ierr = MPI_Allreduce
+ (values != output
+ ?
+ // TODO This const_cast is only needed for older
+ // (e.g., openMPI 1.6, released in 2012)
+ // implementations of MPI-2. It is not needed as
+ // of MPI-3 and we should remove it at some
+ // point in the future.
+ const_cast<void *>(static_cast<const void *>(values))
+ :
+ MPI_IN_PLACE,
+ static_cast<void *>(output),
+ static_cast<int>(size),
+ internal::mpi_type_id(values),
+ mpi_op,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
}
else
#endif
if (job_supports_mpi())
{
T dummy_selector;
- MPI_Allreduce (values != output
- ?
- // TODO This const_cast is only needed for older
- // (e.g., openMPI 1.6, released in 2012)
- // implementations of MPI-2. It is not needed as of
- // MPI-3 and we should remove it at some point in
- // the future.
- const_cast<void *>(static_cast<const void *>(values))
- :
- MPI_IN_PLACE,
- static_cast<void *>(output),
- static_cast<int>(size*2),
- internal::mpi_type_id(&dummy_selector),
- mpi_op,
- mpi_communicator);
+ const int ierr = MPI_Allreduce
+ (values != output
+ ?
+ // TODO This const_cast is only needed for older
+ // (e.g., openMPI 1.6, released in 2012)
+ // implementations of MPI-2. It is not needed as
+ // of MPI-3 and we should remove it at some
+ // point in the future.
+ const_cast<void *>(static_cast<const void *>(values))
+ :
+ MPI_IN_PLACE,
+ static_cast<void *>(output),
+ static_cast<int>(size*2),
+ internal::mpi_type_id(&dummy_selector),
+ mpi_op,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
}
else
#endif
{
#ifdef DEAL_II_WITH_MPI
for (size_type j=0; j<compress_requests.size(); j++)
- MPI_Request_free(&compress_requests[j]);
+ {
+ const int ierr = MPI_Request_free(&compress_requests[j]);
+ AssertThrowMPI(ierr);
+ }
compress_requests.clear();
for (size_type j=0; j<update_ghost_values_requests.size(); j++)
- MPI_Request_free(&update_ghost_values_requests[j]);
+ {
+ const int ierr = MPI_Request_free(&update_ghost_values_requests[j]);
+ AssertThrowMPI(ierr);
+ }
update_ghost_values_requests.clear();
#endif
}
ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
"The number of ghost entries times the size of 'Number' "
"exceeds this value. This is not supported."));
- MPI_Recv_init (&import_data[current_index_start],
- part.import_targets()[i].second*sizeof(Number),
- MPI_BYTE,
- part.import_targets()[i].first,
- part.import_targets()[i].first +
- part.n_mpi_processes()*channel,
- part.get_communicator(),
- &compress_requests[i]);
+ const int ierr = MPI_Recv_init (&import_data[current_index_start],
+ part.import_targets()[i].second*sizeof(Number),
+ MPI_BYTE,
+ part.import_targets()[i].first,
+ part.import_targets()[i].first +
+ part.n_mpi_processes()*channel,
+ part.get_communicator(),
+ &compress_requests[i]);
+ AssertThrowMPI (ierr);
current_index_start += part.import_targets()[i].second;
}
AssertDimension(current_index_start, part.n_import_indices());
ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
"The number of ghost entries times the size of 'Number' "
"exceeds this value. This is not supported."));
- MPI_Send_init (&this->val[current_index_start],
- part.ghost_targets()[i].second*sizeof(Number),
- MPI_BYTE,
- part.ghost_targets()[i].first,
- part.this_mpi_process() +
- part.n_mpi_processes()*channel,
- part.get_communicator(),
- &compress_requests[n_import_targets+i]);
+ const int ierr = MPI_Send_init (&this->val[current_index_start],
+ part.ghost_targets()[i].second*sizeof(Number),
+ MPI_BYTE,
+ part.ghost_targets()[i].first,
+ part.this_mpi_process() +
+ part.n_mpi_processes()*channel,
+ part.get_communicator(),
+ &compress_requests[n_import_targets+i]);
+ AssertThrowMPI (ierr);
current_index_start += part.ghost_targets()[i].second;
}
AssertDimension (current_index_start,
compress_requests.size());
if (compress_requests.size() > 0)
{
- int ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]);
- (void)ierr;
- Assert (ierr == MPI_SUCCESS, ExcInternalError());
+ const int ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]);
+ AssertThrowMPI(ierr);
}
#endif
}
// first wait for the receive to complete
if (compress_requests.size() > 0 && n_import_targets > 0)
{
- int ierr = MPI_Waitall (n_import_targets, &compress_requests[0],
- MPI_STATUSES_IGNORE);
- (void)ierr;
- Assert (ierr == MPI_SUCCESS, ExcInternalError());
+ const int ierr = MPI_Waitall (n_import_targets, &compress_requests[0],
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
Number *read_position = import_data;
std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
if (compress_requests.size() > 0 && n_ghost_targets > 0)
{
- int ierr = MPI_Waitall (n_ghost_targets,
- &compress_requests[n_import_targets],
- MPI_STATUSES_IGNORE);
- (void)ierr;
- Assert (ierr == MPI_SUCCESS, ExcInternalError());
+ const int ierr = MPI_Waitall (n_ghost_targets,
+ &compress_requests[n_import_targets],
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
}
else
AssertDimension (part.n_ghost_indices(), 0);
{
// allow writing into ghost indices even though we are in a
// const function
- MPI_Recv_init (const_cast<Number *>(&val[current_index_start]),
- part.ghost_targets()[i].second*sizeof(Number),
- MPI_BYTE,
- part.ghost_targets()[i].first,
- part.ghost_targets()[i].first +
- counter*part.n_mpi_processes(),
- part.get_communicator(),
- &update_ghost_values_requests[i]);
+ const int ierr = MPI_Recv_init (const_cast<Number *>(&val[current_index_start]),
+ part.ghost_targets()[i].second*sizeof(Number),
+ MPI_BYTE,
+ part.ghost_targets()[i].first,
+ part.ghost_targets()[i].first +
+ counter*part.n_mpi_processes(),
+ part.get_communicator(),
+ &update_ghost_values_requests[i]);
+ AssertThrowMPI (ierr);
current_index_start += part.ghost_targets()[i].second;
}
AssertDimension (current_index_start,
current_index_start = 0;
for (unsigned int i=0; i<n_import_targets; i++)
{
- MPI_Send_init (&import_data[current_index_start],
- part.import_targets()[i].second*sizeof(Number),
- MPI_BYTE, part.import_targets()[i].first,
- part.this_mpi_process() +
- part.n_mpi_processes()*counter,
- part.get_communicator(),
- &update_ghost_values_requests[n_ghost_targets+i]);
+ const int ierr = MPI_Send_init (&import_data[current_index_start],
+ part.import_targets()[i].second*sizeof(Number),
+ MPI_BYTE, part.import_targets()[i].first,
+ part.this_mpi_process() +
+ part.n_mpi_processes()*counter,
+ part.get_communicator(),
+ &update_ghost_values_requests[n_ghost_targets+i]);
+ AssertThrowMPI (ierr);
current_index_start += part.import_targets()[i].second;
}
AssertDimension (current_index_start, part.n_import_indices());
update_ghost_values_requests.size());
if (update_ghost_values_requests.size() > 0)
{
- int ierr = MPI_Startall(update_ghost_values_requests.size(),
- &update_ghost_values_requests[0]);
- (void)ierr;
- Assert (ierr == MPI_SUCCESS, ExcInternalError());
+ const int ierr = MPI_Startall(update_ghost_values_requests.size(),
+ &update_ghost_values_requests[0]);
+ AssertThrowMPI(ierr);
}
#else
(void)counter;
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- int ierr = MPI_Waitall (update_ghost_values_requests.size(),
- &update_ghost_values_requests[0],
- MPI_STATUSES_IGNORE);
- (void)ierr;
- Assert (ierr == MPI_SUCCESS, ExcInternalError());
+ const int ierr = MPI_Waitall (update_ghost_values_requests.size(),
+ &update_ghost_values_requests[0],
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI (ierr);
}
#endif
vector_is_ghosted = true;
int flag = 1;
if (update_ghost_values_requests.size()>0)
{
- int ierr = MPI_Testall (update_ghost_values_requests.size(),
- &update_ghost_values_requests[0],
- &flag, MPI_STATUSES_IGNORE);
- Assert (ierr == MPI_SUCCESS, ExcInternalError());
+ const int ierr = MPI_Testall (update_ghost_values_requests.size(),
+ &update_ghost_values_requests[0],
+ &flag, MPI_STATUSES_IGNORE);
+ AssertThrowMPI (ierr);
Assert (flag == 1,
ExcMessage("MPI found unfinished update_ghost_values() requests"
"when calling swap, which is not allowed"));
}
if (compress_requests.size()>0)
{
- int ierr = MPI_Testall (compress_requests.size(), &compress_requests[0],
- &flag, MPI_STATUSES_IGNORE);
- Assert (ierr == MPI_SUCCESS, ExcInternalError());
+ const int ierr = MPI_Testall (compress_requests.size(), &compress_requests[0],
+ &flag, MPI_STATUSES_IGNORE);
+ AssertThrowMPI (ierr);
Assert (flag == 1,
ExcMessage("MPI found unfinished compress() requests "
"when calling swap, which is not allowed"));
#ifdef DEAL_II_WITH_MPI
if (partitioner->n_mpi_processes() > 1)
for (unsigned int i=0; i<partitioner->this_mpi_process(); i++)
- MPI_Barrier (partitioner->get_communicator());
+ {
+ const int ierr = MPI_Barrier (partitioner->get_communicator());
+ AssertThrowMPI (ierr);
+ }
#endif
out << "Process #" << partitioner->this_mpi_process() << std::endl
#ifdef DEAL_II_WITH_MPI
if (partitioner->n_mpi_processes() > 1)
{
- MPI_Barrier (partitioner->get_communicator());
+ int ierr = MPI_Barrier (partitioner->get_communicator());
+ AssertThrowMPI (ierr);
for (unsigned int i=partitioner->this_mpi_process()+1;
i<partitioner->n_mpi_processes(); i++)
- MPI_Barrier (partitioner->get_communicator());
+ {
+ ierr = MPI_Barrier (partitioner->get_communicator());
+ AssertThrowMPI (ierr);
+ }
}
#endif
// disable the check here only if no processor has any such data
#ifdef DEAL_II_WITH_MPI
unsigned int general_size_glob = 0, general_size_loc = jacobians.size();
- MPI_Allreduce (&general_size_loc, &general_size_glob, 1, MPI_UNSIGNED,
- MPI_MAX, size_info.communicator);
+ int ierr = MPI_Allreduce (&general_size_loc, &general_size_glob, 1,
+ MPI_UNSIGNED, MPI_MAX, size_info.communicator);
+ AssertThrowMPI (ierr);
#else
unsigned int general_size_glob = jacobians.size();
#endif
#ifdef DEAL_II_WITH_MPI
unsigned int quad_size_glob = 0, quad_size_loc = quadrature_points.size();
- MPI_Allreduce (&quad_size_loc, &quad_size_glob, 1, MPI_UNSIGNED,
- MPI_MAX, size_info.communicator);
+ ierr = MPI_Allreduce (&quad_size_loc, &quad_size_glob, 1, MPI_UNSIGNED,
+ MPI_MAX, size_info.communicator);
+ AssertThrowMPI (ierr);
#else
unsigned int quad_size_glob = quadrature_points.size();
#endif
if (Utilities::MPI::job_supports_mpi())
{
int communicators_same = 0;
- MPI_Comm_compare (dist_tria->get_communicator(), comm_mf,
- &communicators_same);
+ const int ierr = MPI_Comm_compare (dist_tria->get_communicator(), comm_mf,
+ &communicators_same);
+ AssertThrowMPI (ierr);
Assert (communicators_same == MPI_IDENT ||
communicators_same == MPI_CONGRUENT,
ExcMessage ("MPI communicator in parallel::distributed::Triangulation "
reinit_vector(mg_dof_handler, component_to_block_map, dst);
#ifdef DEBUG_OUTPUT
std::cout << "copy_to_mg src " << src.l2_norm() << std::endl;
- MPI_Barrier(MPI_COMM_WORLD);
+ int ierr = MPI_Barrier(MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
#endif
if (perform_plain_copy)
{
--level;
#ifdef DEBUG_OUTPUT
- MPI_Barrier(MPI_COMM_WORLD);
+ ierr = MPI_Barrier(MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
#endif
typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator dof_pair_iterator;
dst_level.compress(VectorOperation::insert);
#ifdef DEBUG_OUTPUT
- MPI_Barrier(MPI_COMM_WORLD);
+ ierr = MPI_Barrier(MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
std::cout << "copy_to_mg dst " << level << " " << dst_level.l2_norm() << std::endl;
#endif
}
for (unsigned int level=src.min_level(); level<=src.max_level(); ++level)
{
#ifdef DEBUG_OUTPUT
- MPI_Barrier(MPI_COMM_WORLD);
+ int ierr = MPI_Barrier(MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
std::cout << "copy_from_mg src " << level << " " << src[level].l2_norm() << std::endl;
- MPI_Barrier(MPI_COMM_WORLD);
+ ierr = MPI_Barrier(MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
#endif
typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator dof_pair_iterator;
#ifdef DEBUG_OUTPUT
{
dst.compress(VectorOperation::insert);
- MPI_Barrier(MPI_COMM_WORLD);
+ ierr = MPI_Barrier(MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
std::cout << "copy_from_mg level=" << level << " " << dst.l2_norm() << std::endl;
}
#endif
}
dst.compress(VectorOperation::insert);
#ifdef DEBUG_OUTPUT
- MPI_Barrier(MPI_COMM_WORLD);
+ const int ierr = MPI_Barrier(MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
std::cout << "copy_from_mg " << dst.l2_norm() << std::endl;
#endif
}
double my_values[3] = { mean_double.real(), mean_double.imag(), area };
double global_values[3];
- MPI_Allreduce (my_values, global_values, 3, MPI_DOUBLE,
- MPI_SUM,
- p_triangulation->get_communicator());
+ const int ierr = MPI_Allreduce (my_values, global_values, 3, MPI_DOUBLE,
+ MPI_SUM,
+ p_triangulation->get_communicator());
+ AssertThrowMPI (ierr);
set_possibly_complex_number(global_values[0], global_values[1],
mean);
write_vtu (f);
#else
- int myrank, nproc, err;
- MPI_Comm_rank(comm, &myrank);
- MPI_Comm_size(comm, &nproc);
+ int myrank, nproc;
+ int ierr = MPI_Comm_rank(comm, &myrank);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Comm_size(comm, &nproc);
+ AssertThrowMPI(ierr);
MPI_Info info;
- MPI_Info_create(&info);
+ ierr = MPI_Info_create(&info);
+ AssertThrowMPI(ierr);
MPI_File fh;
- err = MPI_File_open(comm, const_cast<char *>(filename),
- MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh);
- AssertThrow(err==0,
- ExcMessage("Unable to open file <"
- + std::string(filename) +
- "> with MPI_File_open. The error code "
- "returned was "
- + Utilities::to_string(err) + "."));
+ ierr = MPI_File_open(comm, const_cast<char *>(filename),
+ MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh);
+ AssertThrowMPI(ierr);
-
- MPI_File_set_size(fh, 0); // delete the file contents
+ ierr = MPI_File_set_size(fh, 0); // delete the file contents
+ AssertThrowMPI(ierr);
// this barrier is necessary, because otherwise others might already
// write while one core is still setting the size to zero.
- MPI_Barrier(comm);
- MPI_Info_free(&info);
+ ierr = MPI_Barrier(comm);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Info_free(&info);
+ AssertThrowMPI(ierr);
unsigned int header_size;
std::stringstream ss;
DataOutBase::write_vtu_header(ss, vtk_flags);
header_size = ss.str().size();
- MPI_File_write(fh, const_cast<char *>(ss.str().c_str()), header_size, MPI_CHAR, MPI_STATUS_IGNORE);
+ ierr = MPI_File_write(fh, const_cast<char *>(ss.str().c_str()), header_size,
+ MPI_CHAR, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
}
- MPI_Bcast(&header_size, 1, MPI_UNSIGNED, 0, comm);
+ ierr = MPI_Bcast(&header_size, 1, MPI_UNSIGNED, 0, comm);
+ AssertThrowMPI(ierr);
- MPI_File_seek_shared( fh, header_size, MPI_SEEK_SET );
+ ierr = MPI_File_seek_shared( fh, header_size, MPI_SEEK_SET );
+ AssertThrowMPI(ierr);
{
std::stringstream ss;
DataOutBase::write_vtu_main (get_patches(), get_dataset_names(),
get_vector_data_ranges(),
vtk_flags, ss);
- MPI_File_write_ordered(fh, const_cast<char *>(ss.str().c_str()), ss.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
+ ierr = MPI_File_write_ordered(fh, const_cast<char *>(ss.str().c_str()),
+ ss.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
}
//write footer
std::stringstream ss;
DataOutBase::write_vtu_footer(ss);
unsigned int footer_size = ss.str().size();
- MPI_File_write_shared(fh, const_cast<char *>(ss.str().c_str()), footer_size, MPI_CHAR, MPI_STATUS_IGNORE);
+ ierr = MPI_File_write_shared(fh, const_cast<char *>(ss.str().c_str()),
+ footer_size, MPI_CHAR, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
}
- MPI_File_close( &fh );
+ ierr = MPI_File_close( &fh );
+ AssertThrowMPI(ierr);
#endif
}
// And compute the global total
#ifdef DEAL_II_WITH_MPI
- MPI_Comm_rank(comm, &myrank);
- MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
+ int ierr = MPI_Comm_rank(comm, &myrank);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
+ AssertThrowMPI(ierr);
#else
myrank = 0;
global_node_cell_count[0] = local_node_cell_count[0];
int myrank;
#ifdef DEAL_II_WITH_MPI
- MPI_Comm_rank(comm, &myrank);
+ const int ierr = MPI_Comm_rank(comm, &myrank);
+ AssertThrowMPI(ierr);
#else
(void)comm;
myrank = 0;
const std::string &solution_filename,
MPI_Comm comm)
{
+ int ierr;
+ (void)ierr;
#ifndef DEAL_II_WITH_HDF5
// throw an exception, but first make
// sure the compiler does not warn about
#ifndef H5_HAVE_PARALLEL
# ifdef DEAL_II_WITH_MPI
int world_size;
- MPI_Comm_size(comm, &world_size);
+ ierr = MPI_Comm_size(comm, &world_size);
+ AssertThrowMPI(ierr);
AssertThrow (world_size <= 1,
ExcMessage ("Serial HDF5 output on multiple processes is not yet supported."));
# endif
// Compute the global total number of nodes/cells
// And determine the offset of the data for this process
#ifdef DEAL_II_WITH_MPI
- MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
- MPI_Scan(local_node_cell_count, global_node_cell_offsets, 2, MPI_UNSIGNED, MPI_SUM, comm);
+ ierr = MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Scan(local_node_cell_count, global_node_cell_offsets, 2, MPI_UNSIGNED, MPI_SUM, comm);
+ AssertThrowMPI(ierr);
global_node_cell_offsets[0] -= local_node_cell_count[0];
global_node_cell_offsets[1] -= local_node_cell_count[1];
#else
const unsigned int gather_size = (my_rank==0)?n_ranks:1;
std::vector<types::global_dof_index> global_dofs(gather_size);
- MPI_Gather(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
- &(global_dofs[0]), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0,
- communicator);
+ int ierr = MPI_Gather(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ &(global_dofs[0]), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0,
+ communicator);
+ AssertThrowMPI(ierr);
+
if (my_rank == 0)
{
// find out if the received std::vector is ascending
// now broadcast the result
int is_ascending = is_globally_ascending ? 1 : 0;
- MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
+ ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
+ AssertThrowMPI(ierr);
return (is_ascending==1);
#else
unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
{
int n_jobs=1;
- (void) MPI_Comm_size (mpi_communicator, &n_jobs);
+ const int ierr = MPI_Comm_size (mpi_communicator, &n_jobs);
+ AssertThrowMPI(ierr);
return n_jobs;
}
unsigned int this_mpi_process (const MPI_Comm &mpi_communicator)
{
int rank=0;
- (void) MPI_Comm_rank (mpi_communicator, &rank);
+ const int ierr = MPI_Comm_rank (mpi_communicator, &rank);
+ AssertThrowMPI(ierr);
return rank;
}
MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
{
MPI_Comm new_communicator;
- MPI_Comm_dup (mpi_communicator, &new_communicator);
+ const int ierr = MPI_Comm_dup (mpi_communicator, &new_communicator);
+ AssertThrowMPI(ierr);
return new_communicator;
}
// processors in this case, which is more expensive than the reduction
// operation above in MPI_Allreduce)
std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
- MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
- &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
- mpi_comm);
+ const int ierr = MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ mpi_comm);
+ AssertThrowMPI(ierr);
// now we know who is going to communicate with whom. collect who is
// going to communicate with us!
MPI_Op op;
int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+ AssertThrowMPI(ierr);
MinMaxAvg in;
in.sum = in.min = in.max = my_value;
MPI_Datatype types[]= {MPI_DOUBLE, MPI_INT};
ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+ AssertThrowMPI(ierr);
ierr = MPI_Type_commit(&type);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+ AssertThrowMPI(ierr);
ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+ AssertThrowMPI(ierr);
ierr = MPI_Type_free (&type);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+ AssertThrowMPI(ierr);
ierr = MPI_Op_free(&op);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+ AssertThrowMPI(ierr);
result.avg = result.sum / numproc;
// if we have PETSc, we will initialize it and let it handle MPI.
// Otherwise, we will do it.
int MPI_has_been_started = 0;
- MPI_Initialized(&MPI_has_been_started);
+ int ierr = MPI_Initialized(&MPI_has_been_started);
+ AssertThrowMPI(ierr);
AssertThrow (MPI_has_been_started == 0,
ExcMessage ("MPI error. You can only start MPI once!"));
- int mpi_err, provided;
- // this works like mpi_err = MPI_Init (&argc, &argv); but tells MPI that
+ int provided;
+ // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
// we might use several threads but never call two MPI functions at the
// same time. For an explanation see on why we do this see
// http://www.open-mpi.org/community/lists/users/2010/03/12244.php
int wanted = MPI_THREAD_SERIALIZED;
- mpi_err = MPI_Init_thread(&argc, &argv, wanted, &provided);
- AssertThrow (mpi_err == 0,
- ExcMessage ("MPI could not be initialized."));
+ ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
+ AssertThrowMPI(ierr);
// disable for now because at least some implementations always return
// MPI_THREAD_SINGLE.
std::vector<char> all_hostnames(max_hostname_size *
MPI::n_mpi_processes(MPI_COMM_WORLD));
- MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
- &all_hostnames[0], max_hostname_size, MPI_CHAR,
- MPI_COMM_WORLD);
+ const int ierr MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
+ &all_hostnames[0], max_hostname_size, MPI_CHAR,
+ MPI_COMM_WORLD);
+ AssertThrowMPI(ierr);
// search how often our own hostname appears and the how-manyth
// instance the current process represents
}
else
{
- const int mpi_err = MPI_Finalize();
- AssertThrow (mpi_err == 0,
- ExcMessage ("An error occurred while calling MPI_Finalize()"));
+ const int ierr = MPI_Finalize();
+ AssertThrowMPI(ierr);
}
}
#endif
{
#ifdef DEAL_II_WITH_MPI
int MPI_has_been_started = 0;
- MPI_Initialized(&MPI_has_been_started);
+ const int ierr = MPI_Initialized(&MPI_has_been_started);
+ AssertThrowMPI(ierr);
return (MPI_has_been_started > 0);
#else
// Allow non-zero start index for the vector. send this data to all
// processors
first_index[0] = local_range_data.first;
- MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE,
- 0, communicator);
+ int ierr = MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ 0, communicator);
+ AssertThrowMPI(ierr);
// Get the end-of-local_range for all processors
- MPI_Allgather(&local_range_data.second, 1,
- DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1,
- DEAL_II_DOF_INDEX_MPI_TYPE, communicator);
+ ierr = MPI_Allgather(&local_range_data.second, 1,
+ DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1,
+ DEAL_II_DOF_INDEX_MPI_TYPE, communicator);
+ AssertThrowMPI(ierr);
first_index[n_procs] = global_size;
// fix case when there are some processors without any locally owned
for (unsigned int i=0; i<n_ghost_targets; i++)
send_buffer[ghost_targets_data[i].first] = ghost_targets_data[i].second;
- MPI_Alltoall (&send_buffer[0], 1, MPI_INT, &receive_buffer[0], 1,
- MPI_INT, communicator);
+ const int ierr = MPI_Alltoall (&send_buffer[0], 1, MPI_INT, &receive_buffer[0], 1,
+ MPI_INT, communicator);
+ AssertThrowMPI(ierr);
// allocate memory for import data
std::vector<std::pair<unsigned int,unsigned int> > import_targets_temp;
std::vector<MPI_Request> import_requests (import_targets_data.size());
for (unsigned int i=0; i<import_targets_data.size(); i++)
{
- MPI_Irecv (&expanded_import_indices[current_index_start],
- import_targets_data[i].second,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- import_targets_data[i].first, import_targets_data[i].first,
- communicator, &import_requests[i]);
+ const int ierr = MPI_Irecv (&expanded_import_indices[current_index_start],
+ import_targets_data[i].second,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ import_targets_data[i].first,
+ import_targets_data[i].first,
+ communicator, &import_requests[i]);
+ AssertThrowMPI(ierr);
current_index_start += import_targets_data[i].second;
}
AssertDimension (current_index_start, n_import_indices_data);
current_index_start = 0;
for (unsigned int i=0; i<n_ghost_targets; i++)
{
- MPI_Send (&expanded_ghost_indices[current_index_start],
- ghost_targets_data[i].second, DEAL_II_DOF_INDEX_MPI_TYPE,
- ghost_targets_data[i].first, my_pid,
- communicator);
+ const int ierr = MPI_Send (&expanded_ghost_indices[current_index_start],
+ ghost_targets_data[i].second, DEAL_II_DOF_INDEX_MPI_TYPE,
+ ghost_targets_data[i].first, my_pid,
+ communicator);
+ AssertThrowMPI(ierr);
current_index_start += ghost_targets_data[i].second;
}
AssertDimension (current_index_start, n_ghost_indices_data);
if (import_requests.size()>0)
- MPI_Waitall (import_requests.size(), &import_requests[0],
- MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall (import_requests.size(),
+ &import_requests[0],
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
// transform import indices to local index space and compress
// contiguous indices in form of ranges
if (Utilities::MPI::job_supports_mpi())
{
int communicators_same = 0;
- MPI_Comm_compare (part.communicator, communicator,
- &communicators_same);
+ const int ierr = MPI_Comm_compare (part.communicator, communicator,
+ &communicators_same);
+ AssertThrowMPI(ierr);
if (!(communicators_same == MPI_IDENT ||
communicators_same == MPI_CONGRUENT))
return false;
#ifdef DEAL_II_WITH_MPI
if (sync_wall_time)
- MPI_Barrier(mpi_communicator);
+ {
+ const int ierr = MPI_Barrier(mpi_communicator);
+ AssertThrowMPI(ierr);
+ }
#endif
#if defined(DEAL_II_HAVE_SYS_TIME_H) && defined(DEAL_II_HAVE_SYS_RESOURCE_H)
{
MPI_Comm comm = mpi_comm->GetMpiComm();
*mpi_comm = Epetra_MpiComm(MPI_COMM_SELF);
- MPI_Comm_free (&comm);
+ const int ierr = MPI_Comm_free (&comm);
+ AssertThrowMPI(ierr);
}
#endif
}
// compute the minimum on
// processor zero
- MPI_Reduce (comp, result, 2, MPI_DOUBLE,
- MPI_MIN, 0, mpi_communicator);
+ const int ierr = MPI_Reduce (comp, result, 2, MPI_DOUBLE,
+ MPI_MIN, 0, mpi_communicator);
+ AssertThrowMPI(ierr);
// make sure only processor zero
// got something
double result = 0;
// compute the minimum on
// processor zero
- MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE,
- MPI_SUM, 0, mpi_communicator);
+ const int ierr = MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE,
+ MPI_SUM, 0, mpi_communicator);
+ AssertThrowMPI(ierr);
// make sure only processor zero
// got something
do
{
- MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
- master_mpi_rank, mpi_communicator);
+ int ierr = MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+ master_mpi_rank, mpi_communicator);
+ AssertThrowMPI(ierr);
if (interesting_range[0] == interesting_range[1])
return interesting_range[0];
test_threshold));
unsigned int total_count;
- MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED,
- MPI_SUM, master_mpi_rank, mpi_communicator);
+ ierr = MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED,
+ MPI_SUM, master_mpi_rank, mpi_communicator);
+ AssertThrowMPI(ierr);
// now adjust the range. if
// we have to many cells, we
do
{
- MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
- master_mpi_rank, mpi_communicator);
+ int ierr = MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+ master_mpi_rank, mpi_communicator);
+ AssertThrowMPI(ierr);
if (interesting_range[0] == interesting_range[1])
{
// actual largest value
double final_threshold = std::min (interesting_range[0],
global_min_and_max.second);
- MPI_Bcast (&final_threshold, 1, MPI_DOUBLE,
- master_mpi_rank, mpi_communicator);
+ ierr = MPI_Bcast (&final_threshold, 1, MPI_DOUBLE,
+ master_mpi_rank, mpi_communicator);
+ AssertThrowMPI(ierr);
return final_threshold;
}
my_error += criteria(i);
double total_error;
- MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE,
- MPI_SUM, master_mpi_rank, mpi_communicator);
+ ierr = MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE,
+ MPI_SUM, master_mpi_rank, mpi_communicator);
+ AssertThrowMPI(ierr);
// now adjust the range. if we have to many cells, we take
// the upper half of the previous range, otherwise the lower
// Check that level_ghost_owners is symmetric by sending a message
// to everyone
{
-
- MPI_Barrier(this->mpi_communicator);
+ int ierr = MPI_Barrier(this->mpi_communicator);
+ AssertThrowMPI(ierr);
// important: preallocate to avoid (re)allocation:
std::vector<MPI_Request> requests (this->number_cache.level_ghost_owners.size());
Assert (typeid(types::subdomain_id)
== typeid(unsigned int),
ExcNotImplemented());
- MPI_Isend(&dummy, 1, MPI_UNSIGNED,
- *it, 9001, this->mpi_communicator,
- &requests[req_counter]);
+ ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED,
+ *it, 9001, this->mpi_communicator,
+ &requests[req_counter]);
+ AssertThrowMPI(ierr);
}
for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
== typeid(unsigned int),
ExcNotImplemented());
unsigned int dummy;
- MPI_Recv(&dummy, 1, MPI_UNSIGNED,
- *it, 9001, this->mpi_communicator,
- MPI_STATUS_IGNORE);
+ ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED,
+ *it, 9001, this->mpi_communicator,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
}
if (requests.size() > 0)
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ {
+ ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
- MPI_Barrier(this->mpi_communicator);
+ ierr = MPI_Barrier(this->mpi_communicator);
+ AssertThrowMPI(ierr);
}
#endif
// that the packet has been
// received
it->second.pack_data (*buffer);
- MPI_Isend(&(*buffer)[0], buffer->size(),
- MPI_BYTE, it->first,
- 123, this->get_communicator(), &requests[idx]);
+ const int ierr = MPI_Isend(&(*buffer)[0], buffer->size(),
+ MPI_BYTE, it->first,
+ 123, this->get_communicator(), &requests[idx]);
+ AssertThrowMPI(ierr);
}
Assert(destinations.size()==needs_to_get_cells.size(), ExcInternalError());
{
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status);
- MPI_Get_count(&status, MPI_BYTE, &len);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, 123, this->get_communicator(), &status);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
receive.resize(len);
char *ptr = &receive[0];
- MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- this->get_communicator(), &status);
+ ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ this->get_communicator(), &status);
+ AssertThrowMPI(ierr);
cellinfo.unpack_data(receive);
const unsigned int cells = cellinfo.tree_index.size();
// complete all sends, so that we can
// safely destroy the buffers.
if (requests.size() > 0)
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
//check all msgs got sent and received
Assert(Utilities::MPI::sum(needs_to_get_cells.size(), this->get_communicator())
unsigned int send_value
= number_cache.n_locally_owned_active_cells[my_subdomain];
- MPI_Allgather (&send_value,
- 1,
- MPI_UNSIGNED,
- &number_cache.n_locally_owned_active_cells[0],
- 1,
- MPI_UNSIGNED,
- this->mpi_communicator);
+ const int ierr = MPI_Allgather (&send_value,
+ 1,
+ MPI_UNSIGNED,
+ &number_cache.n_locally_owned_active_cells[0],
+ 1,
+ MPI_UNSIGNED,
+ this->mpi_communicator);
+ AssertThrowMPI(ierr);
number_cache.n_global_active_cells
= std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
types::global_dof_index shift = 0;
//set rcounts based on new_numbers:
int cur_count = new_numbers_copy.size ();
- MPI_Allgather (&cur_count, 1, MPI_INT,
- &rcounts[0], 1, MPI_INT,
- tr->get_communicator ());
+ int ierr = MPI_Allgather (&cur_count, 1, MPI_INT,
+ &rcounts[0], 1, MPI_INT,
+ tr->get_communicator ());
+ AssertThrowMPI(ierr);
for (unsigned int i = 0; i < n_cpu; i++)
{
Assert(((int)new_numbers_copy.size()) ==
rcounts[Utilities::MPI::this_mpi_process (tr->get_communicator ())],
ExcInternalError());
- MPI_Allgatherv (&new_numbers_copy[0], new_numbers_copy.size (),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- &gathered_new_numbers[0], &rcounts[0],
- &displs[0],
- DEAL_II_DOF_INDEX_MPI_TYPE,
- tr->get_communicator ());
+ ierr = MPI_Allgatherv (&new_numbers_copy[0], new_numbers_copy.size (),
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ &gathered_new_numbers[0], &rcounts[0],
+ &displs[0],
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ tr->get_communicator ());
+ AssertThrowMPI(ierr);
}
// put new numbers according to the current locally_owned_dofs_per_processor IndexSets
// that the packet has been
// received
it->second.pack_data (sendbuffers[idx]);
- MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
- MPI_BYTE, it->first,
- 1100101, tria.get_communicator(), &requests[idx]);
+ const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
+ MPI_BYTE, it->first,
+ 1100101, tria.get_communicator(), &requests[idx]);
+ AssertThrowMPI(ierr);
}
//* receive requests and reply
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, 1100101, tria.get_communicator(), &status);
- MPI_Get_count(&status, MPI_BYTE, &len);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100101, tria.get_communicator(), &status);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
receive.resize(len);
char *ptr = &receive[0];
- MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tria.get_communicator(), &status);
+ ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ tria.get_communicator(), &status);
+ AssertThrowMPI(ierr);
cellinfo.unpack_data(receive);
//send reply
cellinfo.pack_data(reply_buffers[idx]);
- MPI_Isend(&(reply_buffers[idx])[0], reply_buffers[idx].size(),
- MPI_BYTE, status.MPI_SOURCE,
- 1100102, tria.get_communicator(), &reply_requests[idx]);
+ ierr = MPI_Isend(&(reply_buffers[idx])[0], reply_buffers[idx].size(),
+ MPI_BYTE, status.MPI_SOURCE,
+ 1100102, tria.get_communicator(), &reply_requests[idx]);
+ AssertThrowMPI(ierr);
}
// * finally receive the replies
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, 1100102, tria.get_communicator(), &status);
- MPI_Get_count(&status, MPI_BYTE, &len);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100102, tria.get_communicator(), &status);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
receive.resize(len);
char *ptr = &receive[0];
- MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tria.get_communicator(), &status);
+ ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ tria.get_communicator(), &status);
+ AssertThrowMPI(ierr);
cellinfo.unpack_data(receive);
if (cellinfo.tree_index.size()==0)
// complete all sends, so that we can
// safely destroy the buffers.
if (requests.size() > 0)
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
if (reply_requests.size() > 0)
- MPI_Waitall(reply_requests.size(), &reply_requests[0], MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall(reply_requests.size(), &reply_requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
}
// that the packet has been
// received
it->second.pack_data (*buffer);
- MPI_Isend(&(*buffer)[0], buffer->size(),
- MPI_BYTE, it->first,
- 123, tr->get_communicator(), &requests[idx]);
+ const int ierr = MPI_Isend(&(*buffer)[0], buffer->size(),
+ MPI_BYTE, it->first,
+ 123, tr->get_communicator(), &requests[idx]);
+ AssertThrowMPI(ierr);
}
{
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, 123, tr->get_communicator(), &status);
- MPI_Get_count(&status, MPI_BYTE, &len);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, 123, tr->get_communicator(), &status);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
receive.resize(len);
char *ptr = &receive[0];
- MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tr->get_communicator(), &status);
+ ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ tr->get_communicator(), &status);
+ AssertThrowMPI(ierr);
typename types<dim>::cellinfo cellinfo;
cellinfo.unpack_data(receive);
// complete all sends, so that we can
// safely destroy the buffers.
if (requests.size() > 0)
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
#ifdef DEBUG
unsigned int sent=needs_to_get_cells.size();
unsigned int recv=senders.size();
- MPI_Allreduce(&sent, &sum_send, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
- MPI_Allreduce(&recv, &sum_recv, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
+ int ierr = MPI_Allreduce(&sent, &sum_send, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
+ AssertThrowMPI(ierr);
+ ierr = MPI_Allreduce(&recv, &sum_recv, 1, MPI_UNSIGNED, MPI_SUM, tr->get_communicator());
+ AssertThrowMPI(ierr);
Assert(sum_send==sum_recv, ExcInternalError());
}
#endif
// processors from which we expect
// messages, and by using different
// tags for phase 1 and 2
- MPI_Barrier(tr->get_communicator());
+ const int ierr = MPI_Barrier(tr->get_communicator());
+ AssertThrowMPI(ierr);
#endif
}
//shift ids to make them unique
number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus);
- MPI_Allgather ( &number_cache.n_locally_owned_dofs,
- 1, DEAL_II_DOF_INDEX_MPI_TYPE,
- &number_cache.n_locally_owned_dofs_per_processor[0],
- 1, DEAL_II_DOF_INDEX_MPI_TYPE,
- tr->get_communicator());
+ const int ierr = MPI_Allgather ( &number_cache.n_locally_owned_dofs,
+ 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ &number_cache.n_locally_owned_dofs_per_processor[0],
+ 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ tr->get_communicator());
+ AssertThrowMPI(ierr);
const dealii::types::global_dof_index
shift = std::accumulate (number_cache
//shift ids to make them unique
number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus);
- MPI_Allgather ( &number_cache.n_locally_owned_dofs,
- 1, DEAL_II_DOF_INDEX_MPI_TYPE,
- &number_cache.n_locally_owned_dofs_per_processor[0],
- 1, DEAL_II_DOF_INDEX_MPI_TYPE,
- tr->get_communicator());
+ int ierr = MPI_Allgather ( &number_cache.n_locally_owned_dofs,
+ 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ &number_cache.n_locally_owned_dofs_per_processor[0],
+ 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ tr->get_communicator());
+ AssertThrowMPI(ierr);
const dealii::types::global_dof_index
shift = std::accumulate (number_cache
// This barrier is crucial so that messages between phase 1&2 don't
// mix.
- MPI_Barrier(tr->get_communicator());
+ const int ierr = MPI_Barrier(tr->get_communicator());
+ AssertThrowMPI(ierr);
// Phase 2, only request the cells that were not completed in Phase
// 1.
my_data.resize(max_size);
std::vector<char> buffer(max_size*n_cpus);
- MPI_Allgather(&my_data[0], max_size, MPI_BYTE,
- &buffer[0], max_size, MPI_BYTE,
- tr->get_communicator());
+ const int ierr = MPI_Allgather(&my_data[0], max_size, MPI_BYTE,
+ &buffer[0], max_size, MPI_BYTE,
+ tr->get_communicator());
+ AssertThrowMPI(ierr);
number_cache.locally_owned_dofs_per_processor.resize (n_cpus);
number_cache.n_locally_owned_dofs_per_processor.resize (n_cpus);
all_dof_counts(fe_collection.n_components() *
Utilities::MPI::n_mpi_processes (tria->get_communicator()));
- MPI_Allgather ( &local_dof_count[0],
- n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- &all_dof_counts[0],
- n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- tria->get_communicator());
+ const int ierr = MPI_Allgather ( &local_dof_count[0],
+ n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+ &all_dof_counts[0],
+ n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+ tria->get_communicator());
+ AssertThrowMPI(ierr);
for (unsigned int i=0; i<n_buckets; ++i)
Assert (all_dof_counts[n_buckets*tria->locally_owned_subdomain()+i]
all_dof_counts(fe_collection.n_components() *
Utilities::MPI::n_mpi_processes (tria->get_communicator()));
- MPI_Allgather ( &local_dof_count[0],
- n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- &all_dof_counts[0],
- n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- tria->get_communicator());
+ const int ierr = MPI_Allgather ( &local_dof_count[0],
+ n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+ &all_dof_counts[0],
+ n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+ tria->get_communicator());
+ AssertThrowMPI(ierr);
for (unsigned int i=0; i<n_buckets; ++i)
Assert (all_dof_counts[n_buckets*tria->locally_owned_subdomain()+i]
{
std::vector<types::global_dof_index> local_dof_count = dofs_per_component;
- MPI_Allreduce ( &local_dof_count[0], &dofs_per_component[0], n_target_components,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM, tria->get_communicator());
+ const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_component[0], n_target_components,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ MPI_SUM, tria->get_communicator());
+ AssertThrowMPI (ierr);
}
#endif
}
(&dof_handler.get_triangulation())))
{
std::vector<types::global_dof_index> local_dof_count = dofs_per_block;
- MPI_Allreduce ( &local_dof_count[0], &dofs_per_block[0],
- n_target_blocks,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM, tria->get_communicator());
+ const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_block[0],
+ n_target_blocks,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ MPI_SUM, tria->get_communicator());
+ AssertThrowMPI (ierr);
}
#endif
}
destinations.push_back (it->receiver);
it->pack_data (*buffer);
- MPI_Isend (&(*buffer)[0], buffer->size(),
- MPI_BYTE,
- it->receiver,
- round,
- communicator,
- &requests[idx]);
+ const int ierr = MPI_Isend (&(*buffer)[0], buffer->size(),
+ MPI_BYTE,
+ it->receiver,
+ round,
+ communicator,
+ &requests[idx]);
+ AssertThrowMPI(ierr);
}
Assert(destinations.size()==cells_to_send.size(), ExcInternalError());
{
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status);
- MPI_Get_count(&status, MPI_BYTE, &len);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, round, communicator, &status);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
receive.resize (len);
char *buf = &receive[0];
- MPI_Recv (buf, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, communicator, &status);
+ ierr = MPI_Recv (buf, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, communicator, &status);
+ AssertThrowMPI(ierr);
cell_data.unpack_data (receive);
}
if (requests.size () > 0)
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
// finally sort the list of cells
std::sort (received_cells.begin (), received_cells.end ());
// processors and shifting the indices accordingly
const unsigned int n_cpu = Utilities::MPI::n_mpi_processes(triangulation.get_communicator());
std::vector<types::global_vertex_index> indices(n_cpu);
- MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, &indices[0],
- indices.size(), DEAL_II_DOF_INDEX_MPI_TYPE, triangulation.get_communicator());
+ int ierr = MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, &indices[0],
+ indices.size(), DEAL_II_DOF_INDEX_MPI_TYPE, triangulation.get_communicator());
+ AssertThrowMPI(ierr);
const types::global_vertex_index shift = std::accumulate(&indices[0],
&indices[0]+triangulation.locally_owned_subdomain(),0);
}
// Send the message
- MPI_Isend(&vertices_send_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
- destination, 0, triangulation.get_communicator(), &first_requests[i]);
+ ierr = MPI_Isend(&vertices_send_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
+ destination, 0, triangulation.get_communicator(), &first_requests[i]);
+ AssertThrowMPI(ierr);
}
// Receive the first message
vertices_recv_buffers[i].resize(buffer_size);
// Receive the message
- MPI_Recv(&vertices_recv_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
- source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+ ierr = MPI_Recv(&vertices_recv_buffers[i][0],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
+ source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
}
}
// Send the message
- MPI_Isend(&cellids_send_buffers[i][0], buffer_size, MPI_CHAR,
- destination, 0, triangulation.get_communicator(), &second_requests[i]);
+ ierr = MPI_Isend(&cellids_send_buffers[i][0], buffer_size, MPI_CHAR,
+ destination, 0, triangulation.get_communicator(), &second_requests[i]);
+ AssertThrowMPI(ierr);
}
// Receive the second message
cellids_recv_buffers[i].resize(buffer_size);
// Receive the message
- MPI_Recv(&cellids_recv_buffers[i][0],buffer_size, MPI_CHAR,
- source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+ ierr = MPI_Recv(&cellids_recv_buffers[i][0],buffer_size, MPI_CHAR,
+ source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
}
void
MatrixBase::compress (const VectorOperation::values operation)
{
+ int ierr;
+ (void)ierr;
#ifdef DEBUG
#ifdef DEAL_II_WITH_MPI
// Check that all processors agree that last_action is the same (or none!)
int my_int_last_action = last_action;
int all_int_last_action;
- MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
- MPI_BOR, get_mpi_communicator());
+ ierr = MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
+ MPI_BOR, get_mpi_communicator());
+ AssertThrowMPI(ierr);
AssertThrow(all_int_last_action != (VectorOperation::add | VectorOperation::insert),
ExcMessage("Error: not all processors agree on the last VectorOperation before this compress() call."));
ExcMessage("Missing compress() or calling with wrong VectorOperation argument."));
// flush buffers
- int ierr;
ierr = MatAssemblyBegin (matrix,MAT_FINAL_ASSEMBLY);
AssertThrow (ierr == 0, ExcPETScError(ierr));
// mismatch (may not be true for every proc)
int k_global, k = ((size() != n) || (local_size() != local_sz));
- MPI_Allreduce (&k, &k_global, 1,
- MPI_INT, MPI_LOR, communicator);
+ int ierr = MPI_Allreduce (&k, &k_global, 1,
+ MPI_INT, MPI_LOR, communicator);
+ AssertThrowMPI(ierr);
if (k_global || has_ghost_elements())
{
// AssertThrow (ierr == 0, ExcPETScError(ierr));
// so let's go the slow way:
- int ierr;
#if DEAL_II_PETSC_VERSION_LT(3,2,0)
ierr = VecDestroy (vector);
i++)
{
// This is slow, but most likely only used to debug.
- MPI_Barrier(communicator);
+ ierr = MPI_Barrier(communicator);
+ AssertThrowMPI(ierr);
if (i == Utilities::MPI::this_mpi_process(communicator))
{
if (across)
void
VectorBase::compress (const VectorOperation::values operation)
{
+ int ierr;
+ (void)ierr;
#ifdef DEBUG
#ifdef DEAL_II_WITH_MPI
// Check that all processors agree that last_action is the same (or none!)
int my_int_last_action = last_action;
int all_int_last_action;
- MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
- MPI_BOR, get_mpi_communicator());
+ ierr = MPI_Allreduce(&my_int_last_action, &all_int_last_action, 1, MPI_INT,
+ MPI_BOR, get_mpi_communicator());
+ AssertThrowMPI(ierr);
AssertThrow(all_int_last_action != (::dealii::VectorOperation::add | ::dealii::VectorOperation::insert),
ExcMessage("Error: not all processors agree on the last VectorOperation before this compress() call."));
// we still need to call
// VecAssemblyBegin/End on all
// processors.
- int ierr;
ierr = VecAssemblyBegin(vector);
AssertThrow (ierr == 0, ExcPETScError(ierr));
ierr = VecAssemblyEnd(vector);
{
unsigned int idx=0;
for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
- MPI_Isend(&(it->second[0]),
- it->second.size(),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- it->first,
- 124,
- mpi_comm,
- &requests[idx]);
+ {
+ const int ierr = MPI_Isend(&(it->second[0]),
+ it->second.size(),
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ it->first,
+ 124,
+ mpi_comm,
+ &requests[idx]);
+ AssertThrowMPI(ierr);
+ }
}
{
{
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+ AssertThrowMPI(ierr);
Assert (status.MPI_TAG==124, ExcInternalError());
- MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+ ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+ AssertThrowMPI(ierr);
recv_buf.resize(len);
- MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
- status.MPI_TAG, mpi_comm, &status);
+ ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
+ status.MPI_TAG, mpi_comm, &status);
+ AssertThrowMPI(ierr);
std::vector<DynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
std::vector<DynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
// complete all sends, so that we can safely destroy the buffers.
if (requests.size())
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
}
{
unsigned int idx=0;
for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
- MPI_Isend(&(it->second[0]),
- it->second.size(),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- it->first,
- 124,
- mpi_comm,
- &requests[idx]);
+ {
+ const int ierr = MPI_Isend(&(it->second[0]),
+ it->second.size(),
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ it->first,
+ 124,
+ mpi_comm,
+ &requests[idx]);
+ AssertThrowMPI(ierr);
+ }
}
{
{
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+ AssertThrowMPI(ierr);
Assert (status.MPI_TAG==124, ExcInternalError());
- MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+ ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+ AssertThrowMPI(ierr);
recv_buf.resize(len);
- MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
- status.MPI_TAG, mpi_comm, &status);
+ ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
+ status.MPI_TAG, mpi_comm, &status);
+ AssertThrowMPI(ierr);
std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
// complete all sends, so that we can safely destroy the buffers.
if (requests.size())
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ {
+ const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
}
#endif
}
// the receiving end will be waitng. In that case we just send
// an empty message.
if (data.size())
- MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
+ {
+ const int ierr = MPI_Isend(&data[0], data.size()*sizeof(data[0]),
+ MPI_BYTE, dest, 71, tria->get_communicator(),
+ &*requests.rbegin());
+ AssertThrowMPI(ierr);
+ }
else
- MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
+ {
+ const int ierr = MPI_Isend(NULL, 0, MPI_BYTE, dest, 71,
+ tria->get_communicator(), &*requests.rbegin());
+ AssertThrowMPI(ierr);
+ }
}
}
{
MPI_Status status;
int len;
- MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status);
- MPI_Get_count(&status, MPI_BYTE, &len);
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
if (len==0)
{
- int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tria->get_communicator(), &status);
- AssertThrow(err==MPI_SUCCESS, ExcInternalError());
+ ierr = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ tria->get_communicator(), &status);
+ AssertThrowMPI(ierr);
continue;
}
receive_buffer.resize(count);
void *ptr = &receive_buffer[0];
- int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tria->get_communicator(), &status);
- AssertThrow(err==MPI_SUCCESS, ExcInternalError());
+ ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ tria->get_communicator(), &status);
+ AssertThrowMPI(ierr);
for (unsigned int i=0; i<receive_buffer.size(); ++i)
{
// * wait for all MPI_Isend to complete
if (requests.size() > 0)
{
- MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
requests.clear();
}
#ifdef DEBUG
// Make sure in debug mode, that everybody sent/received all packages
// on this level. If a deadlock occurs here, the list of expected
// senders is not computed correctly.
- MPI_Barrier(tria->get_communicator());
+ const int ierr = MPI_Barrier(tria->get_communicator());
+ AssertThrowMPI(ierr);
#endif
}
#endif