From: Timo Heister Date: Fri, 1 Nov 2019 18:53:11 +0000 (-0400) Subject: use MPI::CollectiveMutex in several places X-Git-Tag: v9.2.0-rc1~911^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f2e40116dbd08fd434873c584a453742898850fd;p=dealii.git use MPI::CollectiveMutex in several places --- diff --git a/include/deal.II/base/mpi_compute_index_owner_internal.h b/include/deal.II/base/mpi_compute_index_owner_internal.h index bf246890a3..b2e63c0a34 100644 --- a/include/deal.II/base/mpi_compute_index_owner_internal.h +++ b/include/deal.II/base/mpi_compute_index_owner_internal.h @@ -119,6 +119,10 @@ namespace Utilities this->partition(owned_indices, comm); #ifdef DEAL_II_WITH_MPI + + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, comm); + unsigned int my_rank = this_mpi_process(comm); types::global_dof_index dic_local_received = 0; @@ -630,6 +634,9 @@ namespace Utilities #ifdef DEAL_II_WITH_MPI + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, comm); + // reserve enough slots for the requests ahead; depending on // whether the owning rank is one of the requesters or not, we // might have one less requests to execute, so fill the requests @@ -768,11 +775,6 @@ namespace Utilities } # endif - // This barrier is important to make sure that two successive calls - // to this functions do not overlap and we confuse messages. See the - // discussion in https://github.com/dealii/dealii/issues/8929 - MPI_Barrier(comm); - #endif // DEAL_II_WITH_MPI return requested_indices; diff --git a/source/base/mpi.cc b/source/base/mpi.cc index 9520a9e709..c5c84b3310 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -237,6 +237,10 @@ namespace Utilities } # if DEAL_II_MPI_VERSION_GTE(2, 2) + + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, mpi_comm); + // Calculate the number of messages to send to each process std::vector dest_vector(n_procs); for (const auto &el : destinations) @@ -947,6 +951,9 @@ namespace Utilities void ConsensusAlgorithm_NBX::run() { + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, this->comm); + // 1) send requests and start receiving the answers start_communication(); @@ -1273,6 +1280,9 @@ namespace Utilities void ConsensusAlgorithm_PEX::run() { + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, this->comm); + // 1) send requests and start receiving the answers // especially determine how many requests are expected const unsigned int n_requests = start_communication();