--- /dev/null
+New: The Utilities::MPI::CollectiveMutex helper class allows
+protection of critical sections in code using MPI.
+<br>
+(Timo Heister, 2019/11/04)
// some constructs with MPI data
// types. Therefore, create some dummies
using MPI_Comm = int;
+using MPI_Request = int;
using MPI_Datatype = int;
using MPI_Op = int;
# ifndef MPI_COMM_WORLD
# ifndef MPI_COMM_SELF
# define MPI_COMM_SELF 0
# endif
+# ifndef MPI_REQUEST_NULL
+# define MPI_REQUEST_NULL 0
+# endif
# ifndef MPI_MIN
# define MPI_MIN 0
# endif
MPI_Comm comm;
};
+ /**
+ * This class represents a mutex to guard a critical section for a set of
+ * processors in a parallel computation using MPI.
+ *
+ * The lock() commands waits until all MPI ranks in the communicator have
+ * released a previous lock using unlock().
+ *
+ * A typical usage involves guarding a critical section using a lock guard:
+ * @code
+ * {
+ * static CollectiveMutex mutex;
+ * CollectiveMutex::ScopedLock lock(mutex, comm);
+ * // [ critical code to be guarded]
+ * }
+ * @endcode
+ *
+ * Here, the critical code will finish on all processors before the mutex
+ * can be acquired again (for example by a second execution of the block
+ * above. The critical code block typically involves MPI communication that
+ * would yield incorrect results without the lock. For example, if the code
+ * contains nonblocking receives with MPI_ANY_SOURCE, packets can be
+ * confused between iterations.
+ *
+ * Note that the mutex needs to be the same instance between calls to the
+ * same critical region. While not required, this can be achieved by making
+ * the instance static (like in the example above). The variable can also be
+ * a global variable, or a member variable of the object to which the
+ * executing function belongs.
+ */
+ class CollectiveMutex
+ {
+ public:
+ /**
+ * This helper class provides a scoped lock for the CollectiveMutex.
+ *
+ * See the class documentation of CollectiveMutex for details.
+ */
+ class ScopedLock
+ {
+ public:
+ /**
+ * Constructor. Blocks until it can aquire the lock.
+ */
+ explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
+ : mutex(mutex)
+ , comm(comm)
+ {
+ mutex.lock(comm);
+ }
+
+ /**
+ * Destructor. Releases the lock.
+ */
+ ~ScopedLock()
+ {
+ mutex.unlock(comm);
+ }
+
+ private:
+ /**
+ * A reference to the mutex.
+ */
+ CollectiveMutex &mutex;
+ /**
+ * The communicator.
+ */
+ const MPI_Comm &comm;
+ };
+
+ /**
+ * Constructor of this class.
+ */
+ explicit CollectiveMutex();
+
+ /**
+ * Destroy the mutex. Assumes the lock is not currently held.
+ */
+ ~CollectiveMutex();
+
+ /**
+ * Aquire the mutex and, if necessary, wait until we can do so.
+ *
+ * This is a collective call that needs to be executed by all processors
+ * in the communicator.
+ */
+ void
+ lock(MPI_Comm comm);
+
+ /**
+ * Release the lock.
+ *
+ * This is a collective call that needs to be executed by all processors
+ * in the communicator.
+ */
+ void
+ unlock(MPI_Comm comm);
+
+ private:
+ /**
+ * Keep track if we have this lock right now.
+ */
+ bool locked;
+
+ /**
+ * The request to keep track of the non-blocking barrier.
+ */
+ MPI_Request request;
+ };
+
+
+
/**
* If @p comm is an intracommunicator, this function returns a new
* communicator @p newcomm with communication group defined by the
* MPI process.
*/
~MPI_InitFinalize();
+
+ /**
+ * Register a reference to an MPI_Request
+ * on which we need to call `MPI_Wait` before calling `MPI_Finalize`.
+ *
+ * The object @p request needs to exist when MPI_Finalize is called, which means the
+ * request is typically statically allocated. Otherwise, you need to call
+ * unregister_request() before the request goes out of scope. Note that is
+ * is acceptable for a request to be already waited on (and consequently
+ * reset to MPI_REQUEST_NULL).
+ *
+ * It is acceptable to call this function more than once with the same
+ * instance (as it is done in the example below).
+ *
+ * Typically, this function is used by CollectiveMutex and not directly,
+ * but it can also be used directly like this:
+ * @code
+ * void my_fancy_communication()
+ * {
+ * static MPI_Request request = MPI_REQUEST_NULL;
+ * MPI_InitFinalize::register_request(request);
+ * MPI_Wait(&request, MPI_STATUS_IGNORE);
+ * // [some algorithm that is not safe to be executed twice in a row.]
+ * MPI_IBarrier(comm, &request);
+ * }
+ * @endcode
+ */
+ static void
+ register_request(MPI_Request &request);
+
+ /**
+ * Unregister a request previously added using register_request().
+ */
+ static void
+ unregister_request(MPI_Request &request);
+
+ private:
+ /**
+ * Requests to MPI_Wait before finalizing
+ */
+ static std::set<MPI_Request *> requests;
};
/**
}
+
+ void
+ MPI_InitFinalize::register_request(MPI_Request &request)
+ {
+ // insert if it is not in the set already:
+ requests.insert(&request);
+ }
+
+
+
+ void
+ MPI_InitFinalize::unregister_request(MPI_Request &request)
+ {
+ Assert(
+ requests.find(&request) != requests.end(),
+ ExcMessage(
+ "You tried to call unregister_request() with an invalid request."));
+
+ requests.erase(&request);
+ }
+
+
+
+ std::set<MPI_Request *> MPI_InitFinalize::requests;
+
+
+
MPI_InitFinalize::~MPI_InitFinalize()
{
// make memory pool release all PETSc/Trilinos/MPI-based vectors that
// would run after MPI_Finalize is called, leading to errors
#ifdef DEAL_II_WITH_MPI
- // Start with the deal.II MPI vectors (need to do this before finalizing
- // PETSc because it finalizes MPI). Delete vectors from the pools:
+ // Before exiting, wait for nonblocking communication to complete:
+ for (auto request : requests)
+ {
+ const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+
+ // Start with deal.II MPI vectors and delete vectors from the pools:
GrowingVectorMemory<
LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
GrowingVectorMemory<LinearAlgebra::distributed::BlockVector<double>>::
std::pair<types::global_dof_index, types::global_dof_index>,
unsigned int>;
+
+
+ CollectiveMutex::CollectiveMutex()
+ : locked(false)
+ , request(MPI_REQUEST_NULL)
+ {
+ Utilities::MPI::MPI_InitFinalize::register_request(request);
+ }
+
+
+
+ CollectiveMutex::~CollectiveMutex()
+ {
+ Assert(
+ !locked,
+ ExcMessage(
+ "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
+
+ Utilities::MPI::MPI_InitFinalize::unregister_request(request);
+ }
+
+
+
+ void
+ CollectiveMutex::lock(MPI_Comm comm)
+ {
+ (void)comm;
+
+ Assert(
+ !locked,
+ ExcMessage(
+ "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
+
+#ifdef DEAL_II_WITH_MPI
+
+ // TODO: For now, we implement this mutex with a blocking barrier
+ // in the lock and unlock. It needs to be tested, if we can move
+ // to a nonblocking barrier (code disabled below).
+
+ const int ierr = MPI_Barrier(comm);
+ AssertThrowMPI(ierr);
+
+# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
+ // wait for non-blocking barrier to finish. This is a noop the
+ // first time we lock().
+ const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+# else
+ // nothing to do as blocking barrier already completed
+# endif
+#endif
+
+ locked = true;
+ }
+
+
+
+ void
+ CollectiveMutex::unlock(MPI_Comm comm)
+ {
+ (void)comm;
+
+ Assert(
+ locked,
+ ExcMessage(
+ "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
+
+#ifdef DEAL_II_WITH_MPI
+
+ // TODO: For now, we implement this mutex with a blocking barrier
+ // in the lock and unlock. It needs to be tested, if we can move
+ // to a nonblocking barrier (code disabled below):
+
+# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
+ const int ierr = MPI_Ibarrier(comm, &request);
+ AssertThrowMPI(ierr);
+# else
+ const int ierr = MPI_Barrier(comm);
+ AssertThrowMPI(ierr);
+# endif
+#endif
+
+ locked = false;
+ }
+
#include "mpi.inst"
} // end of namespace MPI
} // end of namespace Utilities
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check MPI_InitializeFinalize::register_request
+
+#include <deal.II/base/mpi.h>
+
+#include "../tests.h"
+
+void
+unnecessary(MPI_Comm comm)
+{
+ // We register this request, but actually decide to wait at the end of this
+ // function, which means that the request is guaranteed to be NULL
+ // already. Make sure this works:
+ static MPI_Request request = MPI_REQUEST_NULL;
+ Utilities::MPI::MPI_InitFinalize::register_request(request);
+
+ int ierr = MPI_Ibarrier(comm, &request);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+}
+
+
+void
+test(MPI_Comm comm)
+{
+ // Here we execute an Ibarrier at the end and a wait at the
+ // beginning. Without the barrier, multiple calls to this function fail as
+ // messages are mixed up. With it, the algorithm works, but not registering
+ // the request causes an error as well.
+
+ static MPI_Request request = MPI_REQUEST_NULL;
+ Utilities::MPI::MPI_InitFinalize::register_request(request);
+ int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ int tag = 12345;
+ const auto my_rank = Utilities::MPI::this_mpi_process(comm);
+ const auto n_ranks = Utilities::MPI::n_mpi_processes(comm);
+
+ if (my_rank == 0)
+ {
+ std::set<int> received_from;
+ MPI_Status status;
+
+ for (unsigned int n = 1; n < n_ranks; ++n)
+ {
+ unsigned int value;
+ MPI_Recv(&value, 1, MPI_UNSIGNED, MPI_ANY_SOURCE, tag, comm, &status);
+
+ AssertThrow(received_from.count(status.MPI_SOURCE) == 0,
+ ExcMessage("oh no!"));
+ received_from.insert(status.MPI_SOURCE);
+ }
+ }
+ else
+ {
+ unsigned int value = 123;
+ int dest = 0;
+ MPI_Send(&value, 1, MPI_UNSIGNED, dest, tag, comm);
+ }
+
+ ierr = MPI_Ibarrier(comm, &request);
+ AssertThrowMPI(ierr);
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ mpi_initlog();
+
+ unnecessary(MPI_COMM_WORLD);
+ unnecessary(MPI_COMM_WORLD);
+
+ test(MPI_COMM_WORLD);
+ test(MPI_COMM_WORLD);
+ test(MPI_COMM_WORLD);
+}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check MPI::CollectiveMutex
+
+#include <deal.II/base/mpi.h>
+
+#include "../tests.h"
+
+
+void
+unguarded(MPI_Comm comm)
+{
+ int tag = 12345;
+ const auto my_rank = Utilities::MPI::this_mpi_process(comm);
+ const auto n_ranks = Utilities::MPI::n_mpi_processes(comm);
+
+ if (my_rank == 0)
+ {
+ std::set<int> received_from;
+ MPI_Status status;
+
+ for (unsigned int n = 1; n < n_ranks; ++n)
+ {
+ unsigned int value;
+ MPI_Recv(&value, 1, MPI_UNSIGNED, MPI_ANY_SOURCE, tag, comm, &status);
+
+ AssertThrow(received_from.count(status.MPI_SOURCE) == 0,
+ ExcMessage("oh no!"));
+ received_from.insert(status.MPI_SOURCE);
+ }
+ }
+ else
+ {
+ unsigned int value = 123;
+ int dest = 0;
+ MPI_Send(&value, 1, MPI_UNSIGNED, dest, tag, comm);
+ }
+}
+
+
+
+void
+test(MPI_Comm comm)
+{
+ // check that we can use a static mutex:
+ static Utilities::MPI::CollectiveMutex mutex;
+ Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, comm);
+ unguarded(comm);
+}
+
+
+
+void
+test2(MPI_Comm comm)
+{
+ // Check that we can use a mutex that is not static:
+ Utilities::MPI::CollectiveMutex mutex;
+ mutex.lock(comm);
+ MPI_Barrier(comm);
+ mutex.unlock(comm);
+ mutex.lock(comm);
+ mutex.unlock(comm);
+}
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ mpi_initlog();
+
+ test(MPI_COMM_WORLD);
+ test(MPI_COMM_WORLD);
+ test(MPI_COMM_WORLD);
+
+ test2(MPI_COMM_WORLD);
+}