From 1dffa16948c4caf0c396d31dc67ec548dfa15195 Mon Sep 17 00:00:00 2001 From: Pengfei Jia Date: Mon, 28 Feb 2022 16:53:37 -0500 Subject: [PATCH] Remove DEAL_II_MPI_VERSION_GTE An update on Issue #13447: Remove MPI 2.X logic fix some mistakes indent adding back some comments recovering line 616-629 adding back line 447-508 Changing back step 76 --- include/deal.II/base/aligned_vector.h | 12 +- include/deal.II/base/mpi.h | 18 +- .../base/mpi_consensus_algorithms.templates.h | 15 +- .../lac/la_parallel_vector.templates.h | 6 - source/base/mpi.cc | 178 +----------------- tests/mpi/create_mpi_datatype_01.cc | 3 +- 6 files changed, 15 insertions(+), 217 deletions(-) diff --git a/include/deal.II/base/aligned_vector.h b/include/deal.II/base/aligned_vector.h index 51f0f02b3e..0f6bb20983 100644 --- a/include/deal.II/base/aligned_vector.h +++ b/include/deal.II/base/aligned_vector.h @@ -1502,7 +1502,6 @@ AlignedVector::replicate_across_communicator(const MPI_Comm & communicator, const unsigned int root_process) { # ifdef DEAL_II_WITH_MPI -# if DEAL_II_MPI_VERSION_GTE(3, 0) // **** Step 0 **** // All but the root process no longer need their data, so release the memory @@ -1873,20 +1872,13 @@ AlignedVector::replicate_across_communicator(const MPI_Comm & communicator, // **** Consistency check **** // At this point, each process should have a copy of the data. // Verify this in some sort of round-about way -# ifdef DEBUG +# ifdef DEBUG const std::vector packed_data = Utilities::pack(*this); const int hash = std::accumulate(packed_data.begin(), packed_data.end(), int(0)); Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError()); -# endif - - - -# else - // If we only have MPI 2.x, then simply broadcast the current object to all - // other processes and forego the idea of using shmem - *this = Utilities::MPI::broadcast(communicator, *this, root_process); # endif + # else // No MPI -> nothing to replicate (void)communicator; diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h index 2a42571c7c..d75228fcc3 100644 --- a/include/deal.II/base/mpi.h +++ b/include/deal.II/base/mpi.h @@ -80,19 +80,7 @@ using MPI_Op = int; * 4. const_cast the given expression @p expr to this new type. */ #ifdef DEAL_II_WITH_MPI -# if DEAL_II_MPI_VERSION_GTE(3, 0) - -# define DEAL_II_MPI_CONST_CAST(expr) (expr) - -# else - -# include - -# define DEAL_II_MPI_CONST_CAST(expr) \ - const_cast::type>::type *>(expr) - -# endif +# define DEAL_II_MPI_CONST_CAST(expr) (expr) #endif @@ -1336,11 +1324,7 @@ namespace Utilities inline MPI_Datatype mpi_type_id(const bool *) { -# if DEAL_II_MPI_VERSION_GTE(2, 2) return MPI_CXX_BOOL; -# else - return MPI_C_BOOL; -# endif } diff --git a/include/deal.II/base/mpi_consensus_algorithms.templates.h b/include/deal.II/base/mpi_consensus_algorithms.templates.h index e56f8bb19b..acd4e44d4a 100644 --- a/include/deal.II/base/mpi_consensus_algorithms.templates.h +++ b/include/deal.II/base/mpi_consensus_algorithms.templates.h @@ -466,15 +466,8 @@ namespace Utilities NBX::signal_finish(const MPI_Comm &comm) { #ifdef DEAL_II_WITH_MPI -# if DEAL_II_MPI_VERSION_GTE(3, 0) const auto ierr = MPI_Ibarrier(comm, &barrier_request); AssertThrowMPI(ierr); -# else - AssertThrow(false, - ExcMessage( - "ConsensusAlgorithms::NBX uses MPI 3.0 features. " - "You should compile with at least MPI 3.0.")); -# endif #else (void)comm; #endif @@ -895,15 +888,13 @@ namespace Utilities Utilities::MPI::n_mpi_processes(comm) : 1); #ifdef DEAL_II_WITH_MPI -# if DEAL_II_MPI_VERSION_GTE(3, 0) -# ifdef DEBUG +# ifdef DEBUG if (n_procs > 10) -# else +# else if (n_procs > 99) -# endif +# endif consensus_algo.reset(new NBX()); else -# endif #endif if (n_procs > 1) consensus_algo.reset(new PEX()); diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index aae9f31c17..06a79961fe 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -149,7 +149,6 @@ namespace LinearAlgebra else { #ifdef DEAL_II_WITH_MPI -# if DEAL_II_MPI_VERSION_GTE(3, 0) allocated_size = new_alloc_size; const unsigned int size_sm = @@ -237,11 +236,6 @@ namespace LinearAlgebra const auto ierr = MPI_Win_free(&mpi_window); AssertThrowMPI(ierr); }}; -# else - AssertThrow(false, - ExcMessage( - "Sorry, this feature requires MPI 3.0 support")); -# endif #else Assert(false, ExcInternalError()); #endif diff --git a/source/base/mpi.cc b/source/base/mpi.cc index 481102eeb6..b15e272cee 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -183,76 +183,7 @@ namespace Utilities const int tag, MPI_Comm * new_comm) { -# if DEAL_II_MPI_VERSION_GTE(3, 0) return MPI_Comm_create_group(comm, group, tag, new_comm); -# else - int rank; - int ierr = MPI_Comm_rank(comm, &rank); - AssertThrowMPI(ierr); - - int grp_rank; - ierr = MPI_Group_rank(group, &grp_rank); - AssertThrowMPI(ierr); - if (grp_rank == MPI_UNDEFINED) - { - *new_comm = MPI_COMM_NULL; - return MPI_SUCCESS; - } - - int grp_size; - ierr = MPI_Group_size(group, &grp_size); - AssertThrowMPI(ierr); - - ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm); - AssertThrowMPI(ierr); - - MPI_Group parent_grp; - ierr = MPI_Comm_group(comm, &parent_grp); - AssertThrowMPI(ierr); - - std::vector pids(grp_size); - std::vector grp_pids(grp_size); - std::iota(grp_pids.begin(), grp_pids.end(), 0); - ierr = MPI_Group_translate_ranks( - group, grp_size, grp_pids.data(), parent_grp, pids.data()); - AssertThrowMPI(ierr); - ierr = MPI_Group_free(&parent_grp); - AssertThrowMPI(ierr); - - MPI_Comm comm_old = *new_comm; - MPI_Comm ic; - for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2) - { - const int gid = grp_rank / merge_sz; - comm_old = *new_comm; - if (gid % 2 == 0) - { - if ((gid + 1) * merge_sz < grp_size) - { - ierr = (MPI_Intercomm_create( - *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic)); - AssertThrowMPI(ierr); - ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm); - AssertThrowMPI(ierr); - } - } - else - { - ierr = MPI_Intercomm_create( - *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic); - AssertThrowMPI(ierr); - ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm); - AssertThrowMPI(ierr); - } - if (*new_comm != comm_old) - { - Utilities::MPI::free_communicator(ic); - Utilities::MPI::free_communicator(comm_old); - } - } - - return MPI_SUCCESS; -# endif } @@ -353,14 +284,12 @@ namespace Utilities AssertThrowMPI(ierr); # ifdef DEBUG -# if DEAL_II_MPI_VERSION_GTE(3, 0) MPI_Count size64; // this function is only available starting with MPI 3.0: ierr = MPI_Type_size_x(result, &size64); AssertThrowMPI(ierr); Assert(size64 == static_cast(n_bytes), ExcInternalError()); -# endif # endif // Now put the new data type into a std::unique_ptr with a custom @@ -405,7 +334,6 @@ namespace Utilities AssertIndexRange(destination, n_procs); } -# if DEAL_II_MPI_VERSION_GTE(3, 0) // Have a little function that checks if destinations provided // to the current process are unique. The way it does this is @@ -435,16 +363,11 @@ namespace Utilities return ConsensusAlgorithms::NBX().run( destinations, {}, {}, {}, mpi_comm); } - // If that was not the case, we need to use the remainder of the code - // below, i.e., just fall through the if condition above. -# endif - - // So we need to run a different algorithm, specifically one that - // requires more memory -- MPI_Reduce_scatter_block will require memory - // proportional to the number of processes involved; that function is - // also only available for MPI 2.2 or later: -# if DEAL_II_MPI_VERSION_GTE(2, 2) + // So we need to run a different algorithm, specifically one that + // requires more memory -- MPI_Reduce_scatter_block will require memory + // proportional to the number of processes involved; that function is + // also only available for MPI 2.2 or later: static CollectiveMutex mutex; CollectiveMutex::ScopedLock lock(mutex, mpi_comm); @@ -506,59 +429,6 @@ namespace Utilities } return origins; - -# else - - // If we don't have MPI_Reduce_scatter_block available, fall back to - // a different algorithm that requires even more memory: the number - // of processes times the max over the number of destinations they - // each have: - - // Start by letting all processors communicate the maximal number of - // destinations they have: - const unsigned int max_n_destinations = - Utilities::MPI::max(destinations.size(), mpi_comm); - - if (max_n_destinations == 0) - // all processes have nothing to send/receive: - return std::vector(); - - // now that we know the number of data packets every processor wants to - // send, set up a buffer with the maximal size and copy our destinations - // in there, padded with -1's - std::vector my_destinations(max_n_destinations, - numbers::invalid_unsigned_int); - std::copy(destinations.begin(), - destinations.end(), - my_destinations.begin()); - - // now exchange these (we could communicate less data if we used - // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all - // processors in this case, which is more expensive than the reduction - // operation above in MPI_Allreduce) - std::vector all_destinations(max_n_destinations * n_procs); - const int ierr = MPI_Allgather(my_destinations.data(), - max_n_destinations, - MPI_UNSIGNED, - all_destinations.data(), - max_n_destinations, - MPI_UNSIGNED, - mpi_comm); - AssertThrowMPI(ierr); - - // now we know who is going to communicate with whom. collect who is - // going to communicate with us! - std::vector origins; - for (unsigned int i = 0; i < n_procs; ++i) - for (unsigned int j = 0; j < max_n_destinations; ++j) - if (all_destinations[i * max_n_destinations + j] == myid) - origins.push_back(i); - else if (all_destinations[i * max_n_destinations + j] == - numbers::invalid_unsigned_int) - break; - - return origins; -# endif } @@ -581,10 +451,8 @@ namespace Utilities }(); // If all processes report that they have unique destinations, - // then we can short-cut the process using a consensus algorithm (which - // is implemented only for the case of unique destinations, and also only - // for MPI 3 and later): -# if DEAL_II_MPI_VERSION_GTE(3, 0) + // then we can short-cut the process using a consensus algorithm: + if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) == 1) { @@ -593,7 +461,6 @@ namespace Utilities .size(); } else -# endif { const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm); @@ -612,7 +479,6 @@ namespace Utilities for (const auto &el : destinations) ++dest_vector[el]; -# if DEAL_II_MPI_VERSION_GTE(2, 2) // Find out how many processes will send to this one // MPI_Reduce_scatter(_block) does exactly this unsigned int n_recv_from = 0; @@ -627,33 +493,6 @@ namespace Utilities AssertThrowMPI(ierr); return n_recv_from; -# else - // Find out how many processes will send to this one - // by reducing with sum and then scattering the - // results over all processes - std::vector buffer(dest_vector.size()); - unsigned int n_recv_from = 0; - - int ierr = MPI_Reduce(dest_vector.data(), - buffer.data(), - dest_vector.size(), - MPI_UNSIGNED, - MPI_SUM, - 0, - mpi_comm); - AssertThrowMPI(ierr); - ierr = MPI_Scatter(buffer.data(), - 1, - MPI_UNSIGNED, - &n_recv_from, - 1, - MPI_UNSIGNED, - 0, - mpi_comm); - AssertThrowMPI(ierr); - - return n_recv_from; -# endif } } @@ -1289,7 +1128,7 @@ namespace Utilities const int ierr = MPI_Barrier(comm); AssertThrowMPI(ierr); -# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0) +# if 0 // wait for non-blocking barrier to finish. This is a noop the // first time we lock(). const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE); @@ -1319,8 +1158,7 @@ namespace Utilities // TODO: For now, we implement this mutex with a blocking barrier // in the lock and unlock. It needs to be tested, if we can move // to a nonblocking barrier (code disabled below): - -# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0) +# if 0 const int ierr = MPI_Ibarrier(comm, &request); AssertThrowMPI(ierr); # else diff --git a/tests/mpi/create_mpi_datatype_01.cc b/tests/mpi/create_mpi_datatype_01.cc index 40db05eee5..bb8a03fc41 100644 --- a/tests/mpi/create_mpi_datatype_01.cc +++ b/tests/mpi/create_mpi_datatype_01.cc @@ -43,13 +43,12 @@ test_data_type(const std::uint64_t n_bytes) else deallog << " size32=" << size32; -#if DEAL_II_MPI_VERSION_GTE(3, 0) MPI_Count size64; ierr = MPI_Type_size_x(*bigtype, &size64); AssertThrowMPI(ierr); deallog << " size64=" << size64; -#endif + deallog << std::endl; } -- 2.39.5