]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Move job_supports_mpi() to a more central place
authorMartin Kronbichler <martin.kronbichler@uni-a.de>
Wed, 28 Jun 2023 17:46:18 +0000 (19:46 +0200)
committerMartin Kronbichler <martin.kronbichler@uni-a.de>
Wed, 28 Jun 2023 21:36:19 +0000 (23:36 +0200)
include/deal.II/base/mpi.templates.h
include/deal.II/base/mpi_consensus_algorithms.h
include/deal.II/base/mpi_remote_point_evaluation.h
include/deal.II/lac/affine_constraints.templates.h
include/deal.II/lac/la_parallel_block_vector.templates.h
include/deal.II/lac/la_parallel_vector.templates.h
include/deal.II/matrix_free/matrix_free.templates.h
source/base/mpi.cc
source/base/mpi_compute_index_owner_internal.cc
source/base/partitioner.cc
source/dofs/number_cache.cc

index 3be70a21fde823b076208a21a04e0a0425e3650c..64a93dd632ebe4653dac4e2fc1546b99404a6035 100644 (file)
@@ -406,7 +406,7 @@ namespace Utilities
            const unsigned int                            root_process)
     {
 #ifdef DEAL_II_WITH_MPI
-      if (job_supports_mpi() && n_mpi_processes(comm) > 1)
+      if (n_mpi_processes(comm) > 1)
         {
           // 1) perform custom reduction
           T result = vec;
@@ -488,7 +488,7 @@ namespace Utilities
                const MPI_Comm                                comm,
                const std::function<T(const T &, const T &)> &combiner)
     {
-      if (job_supports_mpi() && n_mpi_processes(comm) > 1)
+      if (n_mpi_processes(comm) > 1)
         {
           // 1) perform reduction
           const auto result = Utilities::MPI::reduce<T>(vec, comm, combiner);
index bde1118f1cfc12c4fa5a6d78b51a5c1e2459c4ec..8f24df3370dfd9e6e6bc0f43f1c8b9bd3d8d7db1 100644 (file)
@@ -2405,8 +2405,7 @@ namespace Utilities
         const MPI_Comm comm)
       {
         (void)comm;
-        Assert((Utilities::MPI::job_supports_mpi() == false) ||
-                 (Utilities::MPI::n_mpi_processes(comm) == 1),
+        Assert(Utilities::MPI::n_mpi_processes(comm) == 1,
                ExcMessage("You shouldn't use the 'Serial' class on "
                           "communicators that have more than one process "
                           "associated with it."));
index b41279063084ca639481a62bd4eb5ab1af971508..7b6c53def07d52064ea7c545f191aaf4e924354c 100644 (file)
@@ -476,10 +476,13 @@ namespace Utilities
         }
 
       // make sure all messages have been sent
-      const int ierr = MPI_Waitall(send_requests.size(),
-                                   send_requests.data(),
-                                   MPI_STATUSES_IGNORE);
-      AssertThrowMPI(ierr);
+      if (!send_requests.empty())
+        {
+          const int ierr = MPI_Waitall(send_requests.size(),
+                                       send_requests.data(),
+                                       MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+        }
 #endif
     }
 
@@ -644,10 +647,13 @@ namespace Utilities
             buffer_eval[send_permutation_inv[i]] = recv_buffer_unpacked[c];
         }
 
-      const int ierr = MPI_Waitall(send_requests.size(),
-                                   send_requests.data(),
-                                   MPI_STATUSES_IGNORE);
-      AssertThrowMPI(ierr);
+      if (!send_requests.empty())
+        {
+          const int ierr = MPI_Waitall(send_requests.size(),
+                                       send_requests.data(),
+                                       MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+        }
 
       // evaluate function at points
       evaluation_function(buffer_eval, cell_data);
index 8df61f8e87fdb4d45839df87917f485ea53ec5dc..d74f42a8813086a523ea1c27deb7f8734c0dcb97 100644 (file)
@@ -490,8 +490,7 @@ AffineConstraints<number>::make_consistent_in_parallel(
   const IndexSet &locally_relevant_dofs,
   const MPI_Comm  mpi_communicator)
 {
-  if (Utilities::MPI::job_supports_mpi() == false ||
-      Utilities::MPI::n_mpi_processes(mpi_communicator) == 1)
+  if (Utilities::MPI::n_mpi_processes(mpi_communicator) == 1)
     return; // nothing to do, since serial
 
   Assert(sorted == false, ExcMatrixIsClosed());
index e7476f039697e29957f22faba5b14ced4724f230..fd417fcd05ae7d69b03e29a121bc5c44becbb8d9 100644 (file)
@@ -725,11 +725,7 @@ namespace LinearAlgebra
     {
       Assert(this->n_blocks() > 0, ExcEmptyObject());
 
-      // use int instead of bool. in order to make global reduction operations
-      // work also when MPI_Init was not called, only call MPI_Allreduce
-      // commands when there is more than one processor (note that reinit()
-      // functions handle this case correctly through the job_supports_mpi()
-      // query). this is the same in all the functions below
+      // use int instead of bool
       int local_result = -1;
       for (unsigned int i = 0; i < this->n_blocks(); ++i)
         local_result =
index 999f6204ad127ff0ee593efd6a48f927fc5000cc..13e3c5a11b8198a6edd944be0287a67f899be52a 100644 (file)
@@ -1341,34 +1341,36 @@ namespace LinearAlgebra
 #ifdef DEAL_II_WITH_MPI
 
 #  ifdef DEBUG
-      if (Utilities::MPI::job_supports_mpi())
+      Assert(Utilities::MPI::job_supports_mpi() ||
+               (update_ghost_values_requests.empty() &&
+                compress_requests.empty()),
+             ExcInternalError());
+
+      // make sure that there are not outstanding requests from updating
+      // ghost values or compress
+      int flag = 1;
+      if (update_ghost_values_requests.size() > 0)
         {
-          // make sure that there are not outstanding requests from updating
-          // ghost values or compress
-          int flag = 1;
-          if (update_ghost_values_requests.size() > 0)
-            {
-              const int ierr = MPI_Testall(update_ghost_values_requests.size(),
-                                           update_ghost_values_requests.data(),
-                                           &flag,
-                                           MPI_STATUSES_IGNORE);
-              AssertThrowMPI(ierr);
-              Assert(flag == 1,
-                     ExcMessage(
-                       "MPI found unfinished update_ghost_values() requests "
-                       "when calling swap, which is not allowed."));
-            }
-          if (compress_requests.size() > 0)
-            {
-              const int ierr = MPI_Testall(compress_requests.size(),
-                                           compress_requests.data(),
-                                           &flag,
-                                           MPI_STATUSES_IGNORE);
-              AssertThrowMPI(ierr);
-              Assert(flag == 1,
-                     ExcMessage("MPI found unfinished compress() requests "
-                                "when calling swap, which is not allowed."));
-            }
+          const int ierr = MPI_Testall(update_ghost_values_requests.size(),
+                                       update_ghost_values_requests.data(),
+                                       &flag,
+                                       MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+          Assert(flag == 1,
+                 ExcMessage(
+                   "MPI found unfinished update_ghost_values() requests "
+                   "when calling swap, which is not allowed."));
+        }
+      if (compress_requests.size() > 0)
+        {
+          const int ierr = MPI_Testall(compress_requests.size(),
+                                       compress_requests.data(),
+                                       &flag,
+                                       MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+          Assert(flag == 1,
+                 ExcMessage("MPI found unfinished compress() requests "
+                            "when calling swap, which is not allowed."));
         }
 #  endif
 
index 1323f58aa0a2f985cad437f4bb47c26929083289..0d6e09c878e143bfde5114dc29115239b320726b 100644 (file)
@@ -441,24 +441,12 @@ MatrixFree<dim, Number, VectorizedArrayType>::internal_reinit(
       task_info.allow_ghosted_vectors_in_loops =
         additional_data.allow_ghosted_vectors_in_loops;
 
-      // set variables that are independent of FE
-      if (Utilities::MPI::job_supports_mpi() == true)
-        {
-          task_info.communicator = dof_handler[0]->get_communicator();
-          task_info.my_pid =
-            Utilities::MPI::this_mpi_process(task_info.communicator);
-          task_info.n_procs =
-            Utilities::MPI::n_mpi_processes(task_info.communicator);
-
-          task_info.communicator_sm = additional_data.communicator_sm;
-        }
-      else
-        {
-          task_info.communicator    = MPI_COMM_SELF;
-          task_info.communicator_sm = MPI_COMM_SELF;
-          task_info.my_pid          = 0;
-          task_info.n_procs         = 1;
-        }
+      task_info.communicator    = dof_handler[0]->get_communicator();
+      task_info.communicator_sm = additional_data.communicator_sm;
+      task_info.my_pid =
+        Utilities::MPI::this_mpi_process(task_info.communicator);
+      task_info.n_procs =
+        Utilities::MPI::n_mpi_processes(task_info.communicator);
 
 #ifdef DEBUG
       for (const auto &constraint : constraints)
index dab88e2b97702ea39a96ae9d07f16d1de095633d..76558a606421f7166613c72f063041973fc93cc6 100644 (file)
@@ -149,9 +149,12 @@ namespace Utilities
     unsigned int
     n_mpi_processes(const MPI_Comm mpi_communicator)
     {
-      int       n_jobs = 1;
-      const int ierr   = MPI_Comm_size(mpi_communicator, &n_jobs);
-      AssertThrowMPI(ierr);
+      int n_jobs = 1;
+      if (job_supports_mpi())
+        {
+          const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
+          AssertThrowMPI(ierr);
+        }
 
       return n_jobs;
     }
@@ -160,9 +163,12 @@ namespace Utilities
     unsigned int
     this_mpi_process(const MPI_Comm mpi_communicator)
     {
-      int       rank = 0;
-      const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
-      AssertThrowMPI(ierr);
+      int rank = 0;
+      if (job_supports_mpi())
+        {
+          const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
+          AssertThrowMPI(ierr);
+        }
 
       return rank;
     }
@@ -541,8 +547,7 @@ namespace Utilities
     {
       // If MPI was not started, we have a serial computation and cannot run
       // the other MPI commands
-      if (job_supports_mpi() == false ||
-          Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
+      if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
         {
           for (unsigned int i = 0; i < my_values.size(); ++i)
             {
@@ -1184,21 +1189,24 @@ namespace Utilities
 
 #ifdef DEAL_II_WITH_MPI
 
-      // TODO: For now, we implement this mutex with a blocking barrier
-      // in the lock and unlock. It needs to be tested, if we can move
-      // to a nonblocking barrier (code disabled below).
+      if (job_supports_mpi())
+        {
+          // TODO: For now, we implement this mutex with a blocking barrier in
+          // the lock and unlock. It needs to be tested, if we can move to a
+          // nonblocking barrier (code disabled below).
 
-      const int ierr = MPI_Barrier(comm);
-      AssertThrowMPI(ierr);
+          const int ierr = MPI_Barrier(comm);
+          AssertThrowMPI(ierr);
 
 #  if 0
-      // wait for non-blocking barrier to finish. This is a noop the
-      // first time we lock().
-      const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
-      AssertThrowMPI(ierr);
+          // wait for non-blocking barrier to finish. This is a noop the
+          // first time we lock().
+          const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
 #  else
-      // nothing to do as blocking barrier already completed
+          // nothing to do as blocking barrier already completed
 #  endif
+        }
 #endif
 
       locked = true;
@@ -1222,16 +1230,19 @@ namespace Utilities
 
 #ifdef DEAL_II_WITH_MPI
 
-      // TODO: For now, we implement this mutex with a blocking barrier
-      // in the lock and unlock. It needs to be tested, if we can move
-      // to a nonblocking barrier (code disabled below):
+      if (job_supports_mpi())
+        {
+          // TODO: For now, we implement this mutex with a blocking barrier
+          // in the lock and unlock. It needs to be tested, if we can move
+          // to a nonblocking barrier (code disabled below):
 #  if 0
       const int ierr = MPI_Ibarrier(comm, &request);
       AssertThrowMPI(ierr);
 #  else
-      const int ierr = MPI_Barrier(comm);
-      AssertThrowMPI(ierr);
+          const int ierr = MPI_Barrier(comm);
+          AssertThrowMPI(ierr);
 #  endif
+        }
 #endif
 
       locked = false;
index f3a333089cacc7fa213d9a13510bcd1495b38bd3..778e92371b17e947bf753eadfa6f26c805a274bd 100644 (file)
@@ -170,7 +170,6 @@ namespace Utilities
           // 1) set up the partition
           this->partition(owned_indices, comm);
 
-#ifdef DEAL_II_WITH_MPI
           unsigned int my_rank = this_mpi_process(comm);
 
           types::global_dof_index dic_local_received = 0;
@@ -217,14 +216,14 @@ namespace Utilities
 
                   Assert(next_index > index_range.first, ExcInternalError());
 
-#  ifdef DEBUG
+#ifdef DEBUG
                   // make sure that the owner is the same on the current
                   // interval
                   for (types::global_dof_index i = index_range.first + 1;
                        i < next_index;
                        ++i)
                     AssertDimension(owner, dof_to_dict_rank(i));
-#  endif
+#endif
 
                   // add the interval, either to the local range or into a
                   // buffer to be sent to another processor
@@ -245,6 +244,7 @@ namespace Utilities
                 }
             }
 
+#ifdef DEAL_II_WITH_MPI
           n_dict_procs_in_owned_indices = buffers.size();
           std::vector<MPI_Request> request;
 
@@ -421,8 +421,9 @@ namespace Utilities
             }
 
 #else
-          (void)owned_indices;
+          Assert(buffers.size() == 0, ExcInternalError());
           (void)comm;
+          (void)dic_local_received;
 #endif
         }
 
@@ -432,7 +433,6 @@ namespace Utilities
         Dictionary::partition(const IndexSet &owned_indices,
                               const MPI_Comm  comm)
         {
-#ifdef DEAL_II_WITH_MPI
           const unsigned int n_procs = n_mpi_processes(comm);
           const unsigned int my_rank = this_mpi_process(comm);
 
@@ -452,10 +452,6 @@ namespace Utilities
           local_range.second = get_index_offset(my_rank + 1);
 
           locally_owned_size = local_range.second - local_range.first;
-#else
-          (void)owned_indices;
-          (void)comm;
-#endif
         }
 
 
index 4c0177ee40f31e67c60f80440d0d34dcb4e51068..6c23bba98a00717cc3262fd53506ca7918282024 100644 (file)
@@ -150,16 +150,8 @@ namespace Utilities
     void
     Partitioner::set_owned_indices(const IndexSet &locally_owned_indices)
     {
-      if (Utilities::MPI::job_supports_mpi() == true)
-        {
-          my_pid  = Utilities::MPI::this_mpi_process(communicator);
-          n_procs = Utilities::MPI::n_mpi_processes(communicator);
-        }
-      else
-        {
-          my_pid  = 0;
-          n_procs = 1;
-        }
+      my_pid  = Utilities::MPI::this_mpi_process(communicator);
+      n_procs = Utilities::MPI::n_mpi_processes(communicator);
 
       // set the local range
       Assert(locally_owned_indices.is_contiguous() == true,
index b5085c4a0bafa27c1357a374b8acc67bd8574e01..e7b4e233ca17906afec0ecb71def506f1de6617d 100644 (file)
@@ -91,9 +91,7 @@ namespace internal
       else if (n_locally_owned_dofs_per_processor.empty() == false)
         {
           AssertDimension(n_locally_owned_dofs_per_processor.size(),
-                          (Utilities::MPI::job_supports_mpi() ?
-                             Utilities::MPI::n_mpi_processes(mpi_communicator) :
-                             1));
+                          Utilities::MPI::n_mpi_processes(mpi_communicator));
           return n_locally_owned_dofs_per_processor;
         }
       else
@@ -115,9 +113,7 @@ namespace internal
       else if (locally_owned_dofs_per_processor.empty() == false)
         {
           AssertDimension(locally_owned_dofs_per_processor.size(),
-                          (Utilities::MPI::job_supports_mpi() ?
-                             Utilities::MPI::n_mpi_processes(mpi_communicator) :
-                             1));
+                          Utilities::MPI::n_mpi_processes(mpi_communicator));
           return locally_owned_dofs_per_processor;
         }
       else

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.