this->partition(owned_indices, comm);
#ifdef DEAL_II_WITH_MPI
+
+ static CollectiveMutex mutex;
+ CollectiveMutex::ScopedLock lock(mutex, comm);
+
unsigned int my_rank = this_mpi_process(comm);
types::global_dof_index dic_local_received = 0;
#ifdef DEAL_II_WITH_MPI
+ static CollectiveMutex mutex;
+ CollectiveMutex::ScopedLock lock(mutex, comm);
+
// reserve enough slots for the requests ahead; depending on
// whether the owning rank is one of the requesters or not, we
// might have one less requests to execute, so fill the requests
}
# endif
- // This barrier is important to make sure that two successive calls
- // to this functions do not overlap and we confuse messages. See the
- // discussion in https://github.com/dealii/dealii/issues/8929
- MPI_Barrier(comm);
-
#endif // DEAL_II_WITH_MPI
return requested_indices;
}
# if DEAL_II_MPI_VERSION_GTE(2, 2)
+
+ static CollectiveMutex mutex;
+ CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
+
// Calculate the number of messages to send to each process
std::vector<unsigned int> dest_vector(n_procs);
for (const auto &el : destinations)
void
ConsensusAlgorithm_NBX<T1, T2>::run()
{
+ static CollectiveMutex mutex;
+ CollectiveMutex::ScopedLock lock(mutex, this->comm);
+
// 1) send requests and start receiving the answers
start_communication();
void
ConsensusAlgorithm_PEX<T1, T2>::run()
{
+ static CollectiveMutex mutex;
+ CollectiveMutex::ScopedLock lock(mutex, this->comm);
+
// 1) send requests and start receiving the answers
// especially determine how many requests are expected
const unsigned int n_requests = start_communication();