]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Replace MPI_Comm functions by those of Utilities namespace. 6894/head
authorMarc Fehling <m.fehling@fz-juelich.de>
Thu, 5 Jul 2018 21:56:30 +0000 (15:56 -0600)
committerMarc Fehling <m.fehling@fz-juelich.de>
Sun, 8 Jul 2018 04:13:16 +0000 (22:13 -0600)
source/base/data_out_base.cc
source/base/exceptions.cc
source/base/process_grid.cc
source/distributed/tria.cc

index a6f1f8619711994260932dd23ce9246b8a5aaf7e..43b975fa21a9d8817f2116c3438b3a5f2a719791 100644 (file)
@@ -7273,14 +7273,10 @@ DataOutInterface<dim, spacedim>::write_vtu_in_parallel(const char *filename,
   write_vtu(f);
 #else
 
-  int myrank, nproc;
-  int ierr = MPI_Comm_rank(comm, &myrank);
-  AssertThrowMPI(ierr);
-  ierr = MPI_Comm_size(comm, &nproc);
-  AssertThrowMPI(ierr);
+  const int myrank = Utilities::MPI::this_mpi_process(comm);
 
   MPI_Info info;
-  ierr = MPI_Info_create(&info);
+  int ierr = MPI_Info_create(&info);
   AssertThrowMPI(ierr);
   MPI_File fh;
   ierr = MPI_File_open(comm,
@@ -7405,7 +7401,6 @@ DataOutInterface<dim, spacedim>::create_xdmf_entry(
   MPI_Comm                          comm) const
 {
   unsigned int local_node_cell_count[2], global_node_cell_count[2];
-  int          myrank;
 
 #ifndef DEAL_II_WITH_HDF5
   // throw an exception, but first make sure the compiler does not warn about
@@ -7425,18 +7420,17 @@ DataOutInterface<dim, spacedim>::create_xdmf_entry(
 
   // And compute the global total
 #ifdef DEAL_II_WITH_MPI
-  int ierr = MPI_Comm_rank(comm, &myrank);
-  AssertThrowMPI(ierr);
-  ierr = MPI_Allreduce(local_node_cell_count,
-                       global_node_cell_count,
-                       2,
-                       MPI_UNSIGNED,
-                       MPI_SUM,
-                       comm);
+  const int myrank = Utilities::MPI::this_mpi_process(comm);
+  int       ierr   = MPI_Allreduce(local_node_cell_count,
+                           global_node_cell_count,
+                           2,
+                           MPI_UNSIGNED,
+                           MPI_SUM,
+                           comm);
   AssertThrowMPI(ierr);
 #else
   (void)comm;
-  myrank = 0;
+  const int myrank = 0;
   global_node_cell_count[0] = local_node_cell_count[0];
   global_node_cell_count[1] = local_node_cell_count[1];
 #endif
@@ -7477,14 +7471,11 @@ DataOutInterface<dim, spacedim>::write_xdmf_file(
   const std::string &           filename,
   MPI_Comm                      comm) const
 {
-  int myrank;
-
 #ifdef DEAL_II_WITH_MPI
-  const int ierr = MPI_Comm_rank(comm, &myrank);
-  AssertThrowMPI(ierr);
+  const int myrank = Utilities::MPI::this_mpi_process(comm);
 #else
   (void)comm;
-  myrank = 0;
+  const int myrank = 0;
 #endif
 
   // Only rank 0 process writes the XDMF file
@@ -7780,9 +7771,7 @@ DataOutBase::write_hdf5_parallel(
   // If HDF5 is not parallel and we're using multiple processes, abort
 #  ifndef H5_HAVE_PARALLEL
 #    ifdef DEAL_II_WITH_MPI
-  int world_size;
-  ierr = MPI_Comm_size(comm, &world_size);
-  AssertThrowMPI(ierr);
+  int world_size = Utilities::MPI::n_mpi_processes(comm);
   AssertThrow(
     world_size <= 1,
     ExcMessage(
index c9f83a897acf31adda7ef5f6b6df9024c32986f8..752980fca0f017319ef8064ba54f02f3c25b17c7 100644 (file)
@@ -424,8 +424,7 @@ namespace
       {
         // do the same as in Utilities::MPI::n_mpi_processes() here,
         // but without error checking to not throw again.
-        int n_proc = 1;
-        MPI_Comm_size(MPI_COMM_WORLD, &n_proc);
+        const int n_proc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
         if (n_proc > 1)
           {
             std::cerr
index d0fecc6e447f239639f96da7ae9e4741efeb8a20..dcb77b7162df9e32eb93d1c7c616a25a78639424 100644 (file)
@@ -51,8 +51,7 @@ namespace
 
     // Below we always try to create 2D processor grids:
 
-    int n_processes;
-    MPI_Comm_size(mpi_comm, &n_processes);
+    const int n_processes = Utilities::MPI::n_mpi_processes(mpi_comm);
 
     // Get the total number of cores we can occupy in a rectangular dense matrix
     // with rectangular blocks when every core owns only a single block:
index c4b959893be0ff5630ba35a62a492da6902d9fb4..5794e2c300ad4df899a9acf2cc27537c95b13ffc 100644 (file)
@@ -1493,16 +1493,12 @@ namespace parallel
       const std::string fname_fixed = std::string(filename) + "_fixed.data";
 
       // ----- copied -----
-      // from DataOutInterface::write_vtu_parallel
+      // from DataOutInterface::write_vtu_in_parallel
       // TODO: write general MPIIO interface
-      int myrank, nproc;
-      int ierr = MPI_Comm_rank(mpi_communicator, &myrank);
-      AssertThrowMPI(ierr);
-      ierr = MPI_Comm_size(mpi_communicator, &nproc);
-      AssertThrowMPI(ierr);
+      const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
 
       MPI_Info info;
-      ierr = MPI_Info_create(&info);
+      int      ierr = MPI_Info_create(&info);
       AssertThrowMPI(ierr);
       MPI_File fh;
       ierr = MPI_File_open(mpi_communicator,
@@ -1573,16 +1569,12 @@ namespace parallel
       const std::string fname_fixed = std::string(filename) + "_fixed.data";
 
       // ----- copied -----
-      // from DataOutInterface::write_vtu_parallel
+      // from DataOutInterface::write_vtu_in_parallel
       // TODO: write general MPIIO interface
-      int myrank, nproc;
-      int ierr = MPI_Comm_rank(mpi_communicator, &myrank);
-      AssertThrowMPI(ierr);
-      ierr = MPI_Comm_size(mpi_communicator, &nproc);
-      AssertThrowMPI(ierr);
+      const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
 
       MPI_Info info;
-      ierr = MPI_Info_create(&info);
+      int      ierr = MPI_Info_create(&info);
       AssertThrowMPI(ierr);
       MPI_File fh;
       ierr = MPI_File_open(mpi_communicator,

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.