write_vtu(f);
#else
- int myrank, nproc;
- int ierr = MPI_Comm_rank(comm, &myrank);
- AssertThrowMPI(ierr);
- ierr = MPI_Comm_size(comm, &nproc);
- AssertThrowMPI(ierr);
+ const int myrank = Utilities::MPI::this_mpi_process(comm);
MPI_Info info;
- ierr = MPI_Info_create(&info);
+ int ierr = MPI_Info_create(&info);
AssertThrowMPI(ierr);
MPI_File fh;
ierr = MPI_File_open(comm,
MPI_Comm comm) const
{
unsigned int local_node_cell_count[2], global_node_cell_count[2];
- int myrank;
#ifndef DEAL_II_WITH_HDF5
// throw an exception, but first make sure the compiler does not warn about
// And compute the global total
#ifdef DEAL_II_WITH_MPI
- int ierr = MPI_Comm_rank(comm, &myrank);
- AssertThrowMPI(ierr);
- ierr = MPI_Allreduce(local_node_cell_count,
- global_node_cell_count,
- 2,
- MPI_UNSIGNED,
- MPI_SUM,
- comm);
+ const int myrank = Utilities::MPI::this_mpi_process(comm);
+ int ierr = MPI_Allreduce(local_node_cell_count,
+ global_node_cell_count,
+ 2,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ comm);
AssertThrowMPI(ierr);
#else
(void)comm;
- myrank = 0;
+ const int myrank = 0;
global_node_cell_count[0] = local_node_cell_count[0];
global_node_cell_count[1] = local_node_cell_count[1];
#endif
const std::string & filename,
MPI_Comm comm) const
{
- int myrank;
-
#ifdef DEAL_II_WITH_MPI
- const int ierr = MPI_Comm_rank(comm, &myrank);
- AssertThrowMPI(ierr);
+ const int myrank = Utilities::MPI::this_mpi_process(comm);
#else
(void)comm;
- myrank = 0;
+ const int myrank = 0;
#endif
// Only rank 0 process writes the XDMF file
// If HDF5 is not parallel and we're using multiple processes, abort
# ifndef H5_HAVE_PARALLEL
# ifdef DEAL_II_WITH_MPI
- int world_size;
- ierr = MPI_Comm_size(comm, &world_size);
- AssertThrowMPI(ierr);
+ int world_size = Utilities::MPI::n_mpi_processes(comm);
AssertThrow(
world_size <= 1,
ExcMessage(
{
// do the same as in Utilities::MPI::n_mpi_processes() here,
// but without error checking to not throw again.
- int n_proc = 1;
- MPI_Comm_size(MPI_COMM_WORLD, &n_proc);
+ const int n_proc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
if (n_proc > 1)
{
std::cerr
// Below we always try to create 2D processor grids:
- int n_processes;
- MPI_Comm_size(mpi_comm, &n_processes);
+ const int n_processes = Utilities::MPI::n_mpi_processes(mpi_comm);
// Get the total number of cores we can occupy in a rectangular dense matrix
// with rectangular blocks when every core owns only a single block:
const std::string fname_fixed = std::string(filename) + "_fixed.data";
// ----- copied -----
- // from DataOutInterface::write_vtu_parallel
+ // from DataOutInterface::write_vtu_in_parallel
// TODO: write general MPIIO interface
- int myrank, nproc;
- int ierr = MPI_Comm_rank(mpi_communicator, &myrank);
- AssertThrowMPI(ierr);
- ierr = MPI_Comm_size(mpi_communicator, &nproc);
- AssertThrowMPI(ierr);
+ const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
MPI_Info info;
- ierr = MPI_Info_create(&info);
+ int ierr = MPI_Info_create(&info);
AssertThrowMPI(ierr);
MPI_File fh;
ierr = MPI_File_open(mpi_communicator,
const std::string fname_fixed = std::string(filename) + "_fixed.data";
// ----- copied -----
- // from DataOutInterface::write_vtu_parallel
+ // from DataOutInterface::write_vtu_in_parallel
// TODO: write general MPIIO interface
- int myrank, nproc;
- int ierr = MPI_Comm_rank(mpi_communicator, &myrank);
- AssertThrowMPI(ierr);
- ierr = MPI_Comm_size(mpi_communicator, &nproc);
- AssertThrowMPI(ierr);
+ const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
MPI_Info info;
- ierr = MPI_Info_create(&info);
+ int ierr = MPI_Info_create(&info);
AssertThrowMPI(ierr);
MPI_File fh;
ierr = MPI_File_open(mpi_communicator,