]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Remove DEAL_II_MPI_VERSION_GTE 13465/head
authorPengfei Jia <pengfej@clemson.edu>
Mon, 28 Feb 2022 21:53:37 +0000 (16:53 -0500)
committerPengfei Jia <pengfej@clemson.edu>
Wed, 2 Mar 2022 15:00:53 +0000 (10:00 -0500)
An update on Issue #13447: Remove MPI 2.X logic

fix some mistakes

indent

adding back some comments

recovering line 616-629

adding back line 447-508

Changing back step 76

include/deal.II/base/aligned_vector.h
include/deal.II/base/mpi.h
include/deal.II/base/mpi_consensus_algorithms.templates.h
include/deal.II/lac/la_parallel_vector.templates.h
source/base/mpi.cc
tests/mpi/create_mpi_datatype_01.cc

index 51f0f02b3e46d84002ce3d0ccd237c7719b0f403..0f6bb209839577bd62e6fc67690fb5c48d7d5abc 100644 (file)
@@ -1502,7 +1502,6 @@ AlignedVector<T>::replicate_across_communicator(const MPI_Comm &   communicator,
                                                 const unsigned int root_process)
 {
 #  ifdef DEAL_II_WITH_MPI
-#    if DEAL_II_MPI_VERSION_GTE(3, 0)
 
   // **** Step 0 ****
   // All but the root process no longer need their data, so release the memory
@@ -1873,20 +1872,13 @@ AlignedVector<T>::replicate_across_communicator(const MPI_Comm &   communicator,
   // **** Consistency check ****
   // At this point, each process should have a copy of the data.
   // Verify this in some sort of round-about way
-#      ifdef DEBUG
+#    ifdef DEBUG
   const std::vector<char> packed_data = Utilities::pack(*this);
   const int               hash =
     std::accumulate(packed_data.begin(), packed_data.end(), int(0));
   Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
-#      endif
-
-
-
-#    else
-  // If we only have MPI 2.x, then simply broadcast the current object to all
-  // other processes and forego the idea of using shmem
-  *this = Utilities::MPI::broadcast(communicator, *this, root_process);
 #    endif
+
 #  else
   // No MPI -> nothing to replicate
   (void)communicator;
index 2a42571c7cd44924d6b197cb88db873eb8176043..d75228fcc31165143866cf1c78c5c16596588639 100644 (file)
@@ -80,19 +80,7 @@ using MPI_Op       = int;
  * 4. const_cast the given expression @p expr to this new type.
  */
 #ifdef DEAL_II_WITH_MPI
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
-
-#    define DEAL_II_MPI_CONST_CAST(expr) (expr)
-
-#  else
-
-#    include <type_traits>
-
-#    define DEAL_II_MPI_CONST_CAST(expr)     \
-      const_cast<typename std::remove_const< \
-        typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
-
-#  endif
+#  define DEAL_II_MPI_CONST_CAST(expr) (expr)
 #endif
 
 
@@ -1336,11 +1324,7 @@ namespace Utilities
     inline MPI_Datatype
     mpi_type_id(const bool *)
     {
-#    if DEAL_II_MPI_VERSION_GTE(2, 2)
       return MPI_CXX_BOOL;
-#    else
-      return MPI_C_BOOL;
-#    endif
     }
 
 
index e56f8bb19ba1eb5956419b50702ec774a4e8c87b..acd4e44d4a383fcbd739ceb2276427ae89b5fa32 100644 (file)
@@ -466,15 +466,8 @@ namespace Utilities
       NBX<T1, T2>::signal_finish(const MPI_Comm &comm)
       {
 #ifdef DEAL_II_WITH_MPI
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
         const auto ierr = MPI_Ibarrier(comm, &barrier_request);
         AssertThrowMPI(ierr);
-#  else
-        AssertThrow(false,
-                    ExcMessage(
-                      "ConsensusAlgorithms::NBX uses MPI 3.0 features. "
-                      "You should compile with at least MPI 3.0."));
-#  endif
 #else
         (void)comm;
 #endif
@@ -895,15 +888,13 @@ namespace Utilities
                                         Utilities::MPI::n_mpi_processes(comm) :
                                         1);
 #ifdef DEAL_II_WITH_MPI
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
-#    ifdef DEBUG
+#  ifdef DEBUG
         if (n_procs > 10)
-#    else
+#  else
         if (n_procs > 99)
-#    endif
+#  endif
           consensus_algo.reset(new NBX<T1, T2>());
         else
-#  endif
 #endif
           if (n_procs > 1)
           consensus_algo.reset(new PEX<T1, T2>());
index aae9f31c1774efbf1e82b5641b20666c10e95af2..06a79961fe3e4eaca8c3acc3c5c9fd1b1f6050b3 100644 (file)
@@ -149,7 +149,6 @@ namespace LinearAlgebra
           else
             {
 #ifdef DEAL_II_WITH_MPI
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
               allocated_size = new_alloc_size;
 
               const unsigned int size_sm =
@@ -237,11 +236,6 @@ namespace LinearAlgebra
                                const auto ierr = MPI_Win_free(&mpi_window);
                                AssertThrowMPI(ierr);
                              }};
-#  else
-              AssertThrow(false,
-                          ExcMessage(
-                            "Sorry, this feature requires MPI 3.0 support"));
-#  endif
 #else
               Assert(false, ExcInternalError());
 #endif
index 481102eeb642e68513912c5ee7d2852d332f6564..b15e272cee0c925206e30cfab94b4b80eb018b2c 100644 (file)
@@ -183,76 +183,7 @@ namespace Utilities
                  const int        tag,
                  MPI_Comm *       new_comm)
     {
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
       return MPI_Comm_create_group(comm, group, tag, new_comm);
-#  else
-      int rank;
-      int ierr = MPI_Comm_rank(comm, &rank);
-      AssertThrowMPI(ierr);
-
-      int grp_rank;
-      ierr = MPI_Group_rank(group, &grp_rank);
-      AssertThrowMPI(ierr);
-      if (grp_rank == MPI_UNDEFINED)
-        {
-          *new_comm = MPI_COMM_NULL;
-          return MPI_SUCCESS;
-        }
-
-      int grp_size;
-      ierr = MPI_Group_size(group, &grp_size);
-      AssertThrowMPI(ierr);
-
-      ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
-      AssertThrowMPI(ierr);
-
-      MPI_Group parent_grp;
-      ierr = MPI_Comm_group(comm, &parent_grp);
-      AssertThrowMPI(ierr);
-
-      std::vector<int> pids(grp_size);
-      std::vector<int> grp_pids(grp_size);
-      std::iota(grp_pids.begin(), grp_pids.end(), 0);
-      ierr = MPI_Group_translate_ranks(
-        group, grp_size, grp_pids.data(), parent_grp, pids.data());
-      AssertThrowMPI(ierr);
-      ierr = MPI_Group_free(&parent_grp);
-      AssertThrowMPI(ierr);
-
-      MPI_Comm comm_old = *new_comm;
-      MPI_Comm ic;
-      for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
-        {
-          const int gid = grp_rank / merge_sz;
-          comm_old      = *new_comm;
-          if (gid % 2 == 0)
-            {
-              if ((gid + 1) * merge_sz < grp_size)
-                {
-                  ierr = (MPI_Intercomm_create(
-                    *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
-                  AssertThrowMPI(ierr);
-                  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
-                  AssertThrowMPI(ierr);
-                }
-            }
-          else
-            {
-              ierr = MPI_Intercomm_create(
-                *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
-              AssertThrowMPI(ierr);
-              ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
-              AssertThrowMPI(ierr);
-            }
-          if (*new_comm != comm_old)
-            {
-              Utilities::MPI::free_communicator(ic);
-              Utilities::MPI::free_communicator(comm_old);
-            }
-        }
-
-      return MPI_SUCCESS;
-#  endif
     }
 
 
@@ -353,14 +284,12 @@ namespace Utilities
       AssertThrowMPI(ierr);
 
 #  ifdef DEBUG
-#    if DEAL_II_MPI_VERSION_GTE(3, 0)
       MPI_Count size64;
       // this function is only available starting with MPI 3.0:
       ierr = MPI_Type_size_x(result, &size64);
       AssertThrowMPI(ierr);
 
       Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
-#    endif
 #  endif
 
       // Now put the new data type into a std::unique_ptr with a custom
@@ -405,7 +334,6 @@ namespace Utilities
           AssertIndexRange(destination, n_procs);
         }
 
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
 
       // Have a little function that checks if destinations provided
       // to the current process are unique. The way it does this is
@@ -435,16 +363,11 @@ namespace Utilities
           return ConsensusAlgorithms::NBX<char, char>().run(
             destinations, {}, {}, {}, mpi_comm);
         }
-        // If that was not the case, we need to use the remainder of the code
-        // below, i.e., just fall through the if condition above.
-#  endif
-
 
-        // So we need to run a different algorithm, specifically one that
-        // requires more memory -- MPI_Reduce_scatter_block will require memory
-        // proportional to the number of processes involved; that function is
-        // also only available for MPI 2.2 or later:
-#  if DEAL_II_MPI_VERSION_GTE(2, 2)
+      // So we need to run a different algorithm, specifically one that
+      // requires more memory -- MPI_Reduce_scatter_block will require memory
+      // proportional to the number of processes involved; that function is
+      // also only available for MPI 2.2 or later:
       static CollectiveMutex      mutex;
       CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
 
@@ -506,59 +429,6 @@ namespace Utilities
         }
 
       return origins;
-
-#  else
-
-      // If we don't have MPI_Reduce_scatter_block available, fall back to
-      // a different algorithm that requires even more memory: the number
-      // of processes times the max over the number of destinations they
-      // each have:
-
-      // Start by letting all processors communicate the maximal number of
-      // destinations they have:
-      const unsigned int max_n_destinations =
-        Utilities::MPI::max(destinations.size(), mpi_comm);
-
-      if (max_n_destinations == 0)
-        // all processes have nothing to send/receive:
-        return std::vector<unsigned int>();
-
-      // now that we know the number of data packets every processor wants to
-      // send, set up a buffer with the maximal size and copy our destinations
-      // in there, padded with -1's
-      std::vector<unsigned int> my_destinations(max_n_destinations,
-                                                numbers::invalid_unsigned_int);
-      std::copy(destinations.begin(),
-                destinations.end(),
-                my_destinations.begin());
-
-      // now exchange these (we could communicate less data if we used
-      // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
-      // processors in this case, which is more expensive than the reduction
-      // operation above in MPI_Allreduce)
-      std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
-      const int                 ierr = MPI_Allgather(my_destinations.data(),
-                                     max_n_destinations,
-                                     MPI_UNSIGNED,
-                                     all_destinations.data(),
-                                     max_n_destinations,
-                                     MPI_UNSIGNED,
-                                     mpi_comm);
-      AssertThrowMPI(ierr);
-
-      // now we know who is going to communicate with whom. collect who is
-      // going to communicate with us!
-      std::vector<unsigned int> origins;
-      for (unsigned int i = 0; i < n_procs; ++i)
-        for (unsigned int j = 0; j < max_n_destinations; ++j)
-          if (all_destinations[i * max_n_destinations + j] == myid)
-            origins.push_back(i);
-          else if (all_destinations[i * max_n_destinations + j] ==
-                   numbers::invalid_unsigned_int)
-            break;
-
-      return origins;
-#  endif
     }
 
 
@@ -581,10 +451,8 @@ namespace Utilities
       }();
 
       // If all processes report that they have unique destinations,
-      // then we can short-cut the process using a consensus algorithm (which
-      // is implemented only for the case of unique destinations, and also only
-      // for MPI 3 and later):
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
+      // then we can short-cut the process using a consensus algorithm:
+
       if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
           1)
         {
@@ -593,7 +461,6 @@ namespace Utilities
             .size();
         }
       else
-#  endif
         {
           const unsigned int n_procs =
             Utilities::MPI::n_mpi_processes(mpi_comm);
@@ -612,7 +479,6 @@ namespace Utilities
           for (const auto &el : destinations)
             ++dest_vector[el];
 
-#  if DEAL_II_MPI_VERSION_GTE(2, 2)
           // Find out how many processes will send to this one
           // MPI_Reduce_scatter(_block) does exactly this
           unsigned int n_recv_from = 0;
@@ -627,33 +493,6 @@ namespace Utilities
           AssertThrowMPI(ierr);
 
           return n_recv_from;
-#  else
-        // Find out how many processes will send to this one
-        // by reducing with sum and then scattering the
-        // results over all processes
-        std::vector<unsigned int> buffer(dest_vector.size());
-        unsigned int              n_recv_from = 0;
-
-        int ierr = MPI_Reduce(dest_vector.data(),
-                              buffer.data(),
-                              dest_vector.size(),
-                              MPI_UNSIGNED,
-                              MPI_SUM,
-                              0,
-                              mpi_comm);
-        AssertThrowMPI(ierr);
-        ierr = MPI_Scatter(buffer.data(),
-                           1,
-                           MPI_UNSIGNED,
-                           &n_recv_from,
-                           1,
-                           MPI_UNSIGNED,
-                           0,
-                           mpi_comm);
-        AssertThrowMPI(ierr);
-
-        return n_recv_from;
-#  endif
         }
     }
 
@@ -1289,7 +1128,7 @@ namespace Utilities
       const int ierr = MPI_Barrier(comm);
       AssertThrowMPI(ierr);
 
-#  if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
+#  if 0
       // wait for non-blocking barrier to finish. This is a noop the
       // first time we lock().
       const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
@@ -1319,8 +1158,7 @@ namespace Utilities
       // TODO: For now, we implement this mutex with a blocking barrier
       // in the lock and unlock. It needs to be tested, if we can move
       // to a nonblocking barrier (code disabled below):
-
-#  if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
+#  if 0
       const int ierr = MPI_Ibarrier(comm, &request);
       AssertThrowMPI(ierr);
 #  else
index 40db05eee51fc4a5228447647cda16780efb4232..bb8a03fc415b2f488801cba889bfea08c76bea1a 100644 (file)
@@ -43,13 +43,12 @@ test_data_type(const std::uint64_t n_bytes)
   else
     deallog << " size32=" << size32;
 
-#if DEAL_II_MPI_VERSION_GTE(3, 0)
   MPI_Count size64;
   ierr = MPI_Type_size_x(*bigtype, &size64);
   AssertThrowMPI(ierr);
 
   deallog << " size64=" << size64;
-#endif
+
 
   deallog << std::endl;
 }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.