]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Inroduce ConsensusAlgorithm namespace 9623/head
authorPeter Munch <peterrmuench@gmail.com>
Thu, 5 Mar 2020 09:56:18 +0000 (10:56 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Sat, 7 Mar 2020 07:24:41 +0000 (08:24 +0100)
14 files changed:
include/deal.II/base/mpi_compute_index_owner_internal.h
include/deal.II/base/mpi_consensus_algorithm.h [deleted file]
include/deal.II/base/mpi_consensus_algorithm.templates.h [deleted file]
include/deal.II/base/mpi_consensus_algorithms.h [new file with mode: 0644]
include/deal.II/base/mpi_consensus_algorithms.templates.h [new file with mode: 0644]
include/deal.II/base/mpi_noncontiguous_partitioner.templates.h
include/deal.II/base/mpi_tags.h
source/base/CMakeLists.txt
source/base/mpi.cc
source/base/mpi_consensus_algorithm.cc [deleted file]
source/base/mpi_consensus_algorithms.cc [new file with mode: 0644]
source/base/partitioner.cc
tests/base/consensus_algorithm_01.cc
tests/mpi/compute_index_owner_01.cc

index d853cd8739c4524ef809c67f81c53b0c08e94c17..7af1484e487abc6032ef7d0383c6923ea1af3c0a 100644 (file)
@@ -19,7 +19,7 @@
 #include <deal.II/base/config.h>
 
 #include <deal.II/base/mpi.h>
-#include <deal.II/base/mpi_consensus_algorithm.h>
+#include <deal.II/base/mpi_consensus_algorithms.h>
 
 DEAL_II_NAMESPACE_OPEN
 
@@ -36,14 +36,14 @@ namespace Utilities
       namespace ComputeIndexOwner
       {
         /**
-         * Specialization of ConsensusAlgorithmProcess for setting up the
+         * Specialization of ConsensusAlgorithms::Process for setting up the
          * Dictionary even if there are ranges in the IndexSet space not owned
          * by any processes.
          *
          * @note Only for internal usage.
          */
         class DictionaryPayLoad
-          : public ConsensusAlgorithmProcess<
+          : public ConsensusAlgorithms::Process<
               std::pair<types::global_dof_index, types::global_dof_index>,
               unsigned int>
         {
@@ -68,7 +68,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::compute_targets().
+           * Utilities::MPI::ConsensusAlgorithms::Process::compute_targets().
            */
           virtual std::vector<unsigned int>
           compute_targets() override
@@ -82,7 +82,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::create_request().
+           * Utilities::MPI::ConsensusAlgorithms::Process::create_request().
            */
           virtual void
           create_request(const unsigned int other_rank,
@@ -95,7 +95,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::answer_request().
+           * Utilities::MPI::ConsensusAlgorithms::Process::answer_request().
            */
           virtual void
           answer_request(
@@ -397,17 +397,17 @@ namespace Utilities
               }
             else
               {
-                // with gap: use ConsensusAlgorithm to determine when all
+                // with gap: use ConsensusAlgorithm to determine when all
                 // dictionaries have been set up.
 
-                // 3/4) use ConsensusAlgorithm to send messages with local dofs
-                // to the right dict process
+                // 3/4) use a ConsensusAlgorithm to send messages with local
+                // dofs to the right dict process
                 DictionaryPayLoad temp(buffers,
                                        actually_owning_ranks,
                                        local_range,
                                        actually_owning_rank_list);
 
-                ConsensusAlgorithmSelector<
+                ConsensusAlgorithms::Selector<
                   std::pair<types::global_dof_index, types::global_dof_index>,
                   unsigned int>
                   consensus_algo(temp, comm);
@@ -530,13 +530,13 @@ namespace Utilities
 
 
         /**
-         * Specialization of ConsensusAlgorithmProcess for the context of
+         * Specialization of ConsensusAlgorithms::Process for the context of
          * Utilities::MPI::compute_index_owner() and
          * Utilities::MPI::Partitioner::set_ghost_indices() with additional
          * payload.
          */
-        class ConsensusAlgorithmPayload
-          : public ConsensusAlgorithmProcess<
+        class ConsensusAlgorithmsPayload
+          : public ConsensusAlgorithms::Process<
               std::pair<types::global_dof_index, types::global_dof_index>,
               unsigned int>
         {
@@ -544,11 +544,11 @@ namespace Utilities
           /**
            * Constructor.
            */
-          ConsensusAlgorithmPayload(const IndexSet &owned_indices,
-                                    const IndexSet &indices_to_look_up,
-                                    const MPI_Comm &comm,
-                                    std::vector<unsigned int> &owning_ranks,
-                                    const bool track_index_requests = false)
+          ConsensusAlgorithmsPayload(const IndexSet &owned_indices,
+                                     const IndexSet &indices_to_look_up,
+                                     const MPI_Comm &comm,
+                                     std::vector<unsigned int> &owning_ranks,
+                                     const bool track_index_requests = false)
             : owned_indices(owned_indices)
             , indices_to_look_up(indices_to_look_up)
             , comm(comm)
@@ -636,7 +636,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::answer_request(),
+           * Utilities::MPI::ConsensusAlgorithms::Process::answer_request(),
            * adding the owner of a particular index in request_buffer (and
            * keeping track of who requested a particular index in case that
            * information is also desired).
@@ -663,7 +663,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::compute_targets().
+           * Utilities::MPI::ConsensusAlgorithms::Process::compute_targets().
            */
           virtual std::vector<unsigned int>
           compute_targets() override
@@ -721,7 +721,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::create_request().
+           * Utilities::MPI::ConsensusAlgorithms::Process::create_request().
            */
           virtual void
           create_request(const unsigned int other_rank,
@@ -744,7 +744,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::prepare_buffer_for_answer().
+           * Utilities::MPI::ConsensusAlgorithms::Process::prepare_buffer_for_answer().
            */
           virtual void
           prepare_buffer_for_answer(
@@ -756,7 +756,7 @@ namespace Utilities
 
           /**
            * Implementation of
-           * Utilities::MPI::ConsensusAlgorithmProcess::read_answer().
+           * Utilities::MPI::ConsensusAlgorithms::Process::read_answer().
            */
           virtual void
           read_answer(const unsigned int               other_rank,
diff --git a/include/deal.II/base/mpi_consensus_algorithm.h b/include/deal.II/base/mpi_consensus_algorithm.h
deleted file mode 100644 (file)
index bfd7b3a..0000000
+++ /dev/null
@@ -1,657 +0,0 @@
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2020 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-#ifndef dealii_mpi_consensus_algorithm_h
-#define dealii_mpi_consensus_algorithm_h
-
-#include <deal.II/base/config.h>
-
-#include <deal.II/base/mpi.h>
-#include <deal.II/base/mpi.templates.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-namespace Utilities
-{
-  namespace MPI
-  {
-    /**
-     * An interface to be able to use the ConsensusAlgorithm classes. The main
-     * functionality of the implementations is to return a list of process ranks
-     * this process wants data from and to deal with the optional payload of the
-     * messages sent/received by the ConsensusAlgorithm classes.
-     *
-     * There are two kinds of messages:
-     * - send/request message: A message consisting of a data request
-     *   which should be answered by another process. This message is
-     *   considered as a request message by the receiving rank.
-     * - recv message: The answer to a send/request message.
-     *
-     * @tparam T1 the type of the elements of the vector to sent
-     * @tparam T2 the type of the elements of the vector to received
-     *
-     * @note Since the payloads of the messages are optional, users have
-     *       to deal with buffers themselves. The ConsensusAlgorithm classes 1)
-     *       deliver only references to empty vectors (of size 0)
-     *       the data to be sent can be inserted to or read from, and
-     *       2) communicate these vectors blindly.
-     *
-     * @author Peter Munch, 2019
-     */
-    template <typename T1, typename T2>
-    class ConsensusAlgorithmProcess
-    {
-    public:
-      /**
-       * Destructor.
-       */
-      virtual ~ConsensusAlgorithmProcess() = default;
-
-      /**
-       * @return A vector of ranks this process wants to send a request to.
-       *
-       * @note This is the only method which has to be implemented since the
-       *       payloads of the messages are optional.
-       */
-      virtual std::vector<unsigned int>
-      compute_targets() = 0;
-
-      /**
-       * Add to the request to the process with the specified rank a payload.
-       *
-       * @param[in]  other_rank rank of the process
-       * @param[out] send_buffer data to be sent part of the request (optional)
-       *
-       * @note The buffer is empty. Before using it, you have to set its size.
-       */
-      virtual void
-      create_request(const unsigned int other_rank,
-                     std::vector<T1> &  send_buffer);
-
-      /**
-       * Prepare the buffer where the payload of the answer of the request to
-       * the process with the specified rank is saved in. The most obvious task
-       * is to resize the buffer, since it is empty when the function is called.
-       *
-       * @param[in]  other_rank rank of the process
-       * @param[out] recv_buffer data to be sent part of the request (optional)
-       */
-      virtual void
-      prepare_buffer_for_answer(const unsigned int other_rank,
-                                std::vector<T2> &  recv_buffer);
-
-      /**
-       * Prepare the buffer where the payload of the answer of the request to
-       * the process with the specified rank is saved in.
-       *
-       * @param[in]  other_rank rank of the process
-       * @param[in]  buffer_recv received payload (optional)
-       * @param[out] request_buffer payload to be sent as part of the request
-       *             (optional)
-       *
-       * @note The request_buffer is empty. Before using it, you have to set
-       *       its size.
-       */
-      virtual void
-      answer_request(const unsigned int     other_rank,
-                     const std::vector<T1> &buffer_recv,
-                     std::vector<T2> &      request_buffer);
-
-      /**
-       * Process the payload of the answer of the request to the process with
-       * the specified rank.
-       *
-       * @param[in] other_rank rank of the process
-       * @param[in] recv_buffer data to be sent part of the request (optional)
-       */
-      virtual void
-      read_answer(const unsigned int     other_rank,
-                  const std::vector<T2> &recv_buffer);
-    };
-
-
-
-    /**
-     * A base class for algorithms that implement the task of coming up with
-     * communication patterns to retrieve data from other processes in a
-     * dynamic-sparse way. In computer science, this is often called a
-     * <a href="https://en.wikipedia.org/wiki/Consensus_algorithm">consensus
-     * problem</a>.
-     *
-     * Dynamic-sparse means in this context:
-     * - By the time this function is called, the other processes do
-     *   not know yet that they have to answer requests.
-     * - Each process only has to communicate with a small subset of
-     *   processes of the MPI communicator.
-     *
-     * Naturally, the user has to provide:
-     * - A communicator.
-     * - For each rank a list of ranks of processes this process should
-     *   communicate to.
-     * - Functionality to pack/unpack data to be sent/received.
-     *
-     * This base class only introduces a basic interface to achieve
-     * these goals, while derived classes implement different algorithms
-     * to actually compute such communication patterns.
-     * The last two features of the list above this paragraph are implemented
-     * in classes derived from ConsensusAlgorithmProcess.
-     *
-     * @tparam T1 The type of the elements of the vector to be sent.
-     * @tparam T2 The type of the elements of the vector to be received.
-     *
-     * @author Peter Munch, 2019
-     */
-    template <typename T1, typename T2>
-    class ConsensusAlgorithm
-    {
-    public:
-      ConsensusAlgorithm(ConsensusAlgorithmProcess<T1, T2> &process,
-                         const MPI_Comm &                   comm);
-
-      /**
-       * Destructor.
-       */
-      virtual ~ConsensusAlgorithm() = default;
-
-      /**
-       * Run consensus algorithm.
-       */
-      virtual void
-      run() = 0;
-
-    protected:
-      /**
-       * Reference to the process provided by the user.
-       */
-      ConsensusAlgorithmProcess<T1, T2> &process;
-
-      /**
-       * MPI communicator.
-       */
-      const MPI_Comm &comm;
-
-      /**
-       * Rank of this process.
-       */
-      const unsigned int my_rank;
-
-      /**
-       * Number of processes in the communicator.
-       */
-      const unsigned int n_procs;
-    };
-
-
-    /**
-     * This class implements a concrete algorithm for the ConsensusAlgorithm
-     * base class, using only point-to-point communications and a single
-     * IBarrier.
-     *
-     * @note This class closely follows the paper Hoefler, Siebert, Lumsdaine
-     *       "Scalable Communication Protocols for Dynamic Sparse Data
-     *       Exchange". Since the algorithm shown there is not considering
-     *       payloads, the algorithm has been modified here in such a way that
-     *       synchronous sends (Issend) have been replaced by equivalent
-     *       Isend/Irecv, where Irecv receives the answer to a request (with
-     *       payload).
-     *
-     * @tparam T1 The type of the elements of the vector to be sent.
-     * @tparam T2 The type of the elements of the vector to be received.
-     *
-     * @author Peter Munch, 2019
-     */
-    template <typename T1, typename T2>
-    class ConsensusAlgorithm_NBX : public ConsensusAlgorithm<T1, T2>
-    {
-    public:
-      /**
-       * Constructor.
-       *
-       * @param process Process to be run during consensus algorithm.
-       * @param comm MPI Communicator
-       */
-      ConsensusAlgorithm_NBX(ConsensusAlgorithmProcess<T1, T2> &process,
-                             const MPI_Comm &                   comm);
-
-      /**
-       * Destructor.
-       */
-      virtual ~ConsensusAlgorithm_NBX() = default;
-
-      /**
-       * @copydoc ConsensusAlgorithm::run()
-       */
-      virtual void
-      run() override;
-
-    private:
-#ifdef DEAL_II_WITH_MPI
-      /**
-       * List of processes this process wants to send requests to.
-       */
-      std::vector<unsigned int> targets;
-
-      /**
-       * Buffers for sending requests.
-       */
-      std::vector<std::vector<T1>> send_buffers;
-
-      /**
-       * Requests for sending requests.
-       */
-      std::vector<MPI_Request> send_requests;
-
-      /**
-       * Buffers for receiving answers to requests.
-       */
-      std::vector<std::vector<T2>> recv_buffers;
-
-
-      /**
-       * Requests for receiving answers to requests.
-       */
-      std::vector<MPI_Request> recv_requests;
-
-      /**
-       * Buffers for sending answers to requests.
-       */
-      std::vector<std::unique_ptr<std::vector<T2>>> request_buffers;
-
-      /**
-       * Requests for sending answers to requests.
-       */
-      std::vector<std::unique_ptr<MPI_Request>> request_requests;
-
-      // request for barrier
-      MPI_Request barrier_request;
-#endif
-
-#ifdef DEBUG
-      /**
-       * List of processes who have made a request to this process.
-       */
-      std::set<unsigned int> requesting_processes;
-#endif
-
-      /**
-       * Check if all request answers have been received by this rank.
-       */
-      bool
-      check_own_state();
-
-      /**
-       * Signal to all other ranks that this rank has received all request
-       * answers via entering IBarrier.
-       */
-      void
-      signal_finish();
-
-      /**
-       * Check if all ranks have received all their request answers, i.e.
-       * all ranks have reached the IBarrier.
-       */
-      bool
-      check_global_state();
-
-      /**
-       * A request message from another rank has been received: process the
-       * request and send an answer.
-       */
-      void
-      answer_requests();
-
-      /**
-       * Start to send all requests via ISend and post IRecvs for the incoming
-       * answer messages.
-       */
-      void
-      start_communication();
-
-      /**
-       * After all rank has received all answers, the MPI data structures can be
-       * freed and the received answers can be processed.
-       */
-      void
-      clean_up_and_end_communication();
-    };
-
-    /**
-     * This class implements a concrete algorithm for the ConsensusAlgorithm
-     * base class, using a two step approach. In
-     * the first step the source ranks are determined and in the second step
-     * a static sparse data exchange is performed.
-     *
-     * @note In contrast to ConsensusAlgorithm_NBX, this class splits the same
-     *       task into two distinct steps. In the first step, all processes are
-     *       identified who want to send a request to this process. In the
-     *       second step, the data is exchanged. However, since - in the second
-     *       step - now it is clear how many requests have to be answered, i.e.
-     *       when this process can stop waiting for requests, no IBarrier is
-     *       needed.
-     *
-     * @note The function
-     *       Utilities::MPI::compute_point_to_point_communication_pattern() is
-     *       used to determine the source processes, which implements a
-     *       PEX-algorithm from Hoefner et al., "Scalable Communication
-     *       Protocols for Dynamic Sparse Data Exchange".
-     *
-     * @tparam T1 The type of the elements of the vector to be sent.
-     * @tparam T2 The type of the elements of the vector to be received.
-     *
-     * @author Peter Munch, 2019
-     */
-    template <typename T1, typename T2>
-    class ConsensusAlgorithm_PEX : public ConsensusAlgorithm<T1, T2>
-    {
-    public:
-      /**
-       * Constructor.
-       *
-       * @param process Process to be run during consensus algorithm.
-       * @param comm MPI Communicator
-       */
-      ConsensusAlgorithm_PEX(ConsensusAlgorithmProcess<T1, T2> &process,
-                             const MPI_Comm &                   comm);
-
-      /**
-       * Destructor.
-       */
-      virtual ~ConsensusAlgorithm_PEX() = default;
-
-      /**
-       * @copydoc ConsensusAlgorithm::run()
-       */
-      virtual void
-      run() override;
-
-    private:
-#ifdef DEAL_II_WITH_MPI
-      /**
-       * List of ranks of processes this processes wants to send a request to.
-       */
-      std::vector<unsigned int> targets;
-
-      /**
-       * List of ranks of processes wanting to send a request to this process.
-       */
-      std::vector<unsigned int> sources;
-
-      // data structures to send and receive requests
-
-      /**
-       * Buffers for sending requests.
-       */
-      std::vector<std::vector<T1>> send_buffers;
-
-      /**
-       * Buffers for receiving answers to requests.
-       */
-      std::vector<std::vector<T2>> recv_buffers;
-
-      /**
-       * Requests for sending requests and receiving answers to requests.
-       */
-      std::vector<MPI_Request> send_and_recv_buffers;
-
-      /**
-       * Buffers for sending answers to requests.
-       */
-      std::vector<std::vector<T2>> requests_buffers;
-
-      /**
-       * Requests for sending answers to requests.
-       */
-      std::vector<MPI_Request> requests_answers;
-#endif
-
-      /**
-       * The ith request message from another rank has been received: process
-       * the request and send an answer.
-       */
-      void
-      answer_requests(int index);
-
-      /**
-       * Start to send all requests via ISend and post IRecvs for the incoming
-       * answer messages.
-       */
-      unsigned int
-      start_communication();
-
-      /**
-       * After all answers have been exchanged, the MPI data structures can be
-       * freed and the received answers can be processed.
-       */
-      void
-      clean_up_and_end_communication();
-    };
-
-    /**
-     * A class which delegates its task to other ConsensusAlgorithm
-     * implementations depending on the number of processes in the
-     * MPI communicator. For a small number of processes it uses
-     * ConsensusAlgorithm_PEX and for a large number of processes
-     * ConsensusAlgorithm_NBX. The threshold depends if the program is
-     * compiled in debug or release mode.
-     *
-     * @tparam T1 The type of the elements of the vector to be sent.
-     * @tparam T2 The type of the elements of the vector to be received.
-     *
-     * @author Peter Munch, 2019
-     */
-    template <typename T1, typename T2>
-    class ConsensusAlgorithmSelector : public ConsensusAlgorithm<T1, T2>
-    {
-    public:
-      /**
-       * Constructor.
-       *
-       * @param process Process to be run during consensus algorithm.
-       * @param comm MPI Communicator.
-       */
-      ConsensusAlgorithmSelector(ConsensusAlgorithmProcess<T1, T2> &process,
-                                 const MPI_Comm &                   comm);
-
-      /**
-       * Destructor.
-       */
-      virtual ~ConsensusAlgorithmSelector() = default;
-
-      /**
-       * @copydoc ConsensusAlgorithm::run()
-       *
-       * @note The function call is delegated to another ConsensusAlgorithm implementation.
-       */
-      virtual void
-      run() override;
-
-    private:
-      // Pointer to the actual ConsensusAlgorithm implementation.
-      std::shared_ptr<ConsensusAlgorithm<T1, T2>> consensus_algo;
-    };
-
-    /**
-     * This class implements Utilities::MPI::ConsensusAlgorithmProcess,
-     * using user-provided function wrappers.
-     * The advantage of this class is that users do not have to write their
-     * own implementation but can register lambda functions directly.
-     */
-    template <typename T1, typename T2>
-    class AnonymousConsensusAlgorithmProcess
-      : public ConsensusAlgorithmProcess<T1, T2>
-    {
-    public:
-      /**
-       * Register functions that should be called for implementing the interface
-       * of ConsensusAlgorithmProcess.
-       *
-       * @param function_compute_targets called during `compute_targets`.
-       * @param function_create_request called during `create_request`.
-       * @param function_answer_request called during `answer_request`.
-       * @param function_prepare_buffer_for_answer called during
-       *   `prepare_buffer_for_answer`.
-       * @param function_read_answer called during `read_answer`.
-       */
-      AnonymousConsensusAlgorithmProcess(
-        const std::function<std::vector<unsigned int>()>
-          &function_compute_targets,
-        const std::function<void(const unsigned int, std::vector<T1> &)>
-          &function_create_request =
-            [](const unsigned int, std::vector<T1> &) {},
-        const std::function<void(const unsigned int,
-                                 const std::vector<T1> &,
-                                 std::vector<T2> &)> &function_answer_request =
-          [](const unsigned int, const std::vector<T1> &, std::vector<T2> &) {},
-        const std::function<void(const unsigned int, std::vector<T2> &)>
-          &function_prepare_buffer_for_answer =
-            [](const unsigned int, std::vector<T2> &) {},
-        const std::function<void(const unsigned int, const std::vector<T2> &)>
-          &function_read_answer =
-            [](const unsigned int, const std::vector<T2> &) {});
-
-      /**
-       * @copydoc ConsensusAlgorithmProcess::compute_targets()
-       */
-      std::vector<unsigned int>
-      compute_targets() override;
-
-      /**
-       * @copydoc ConsensusAlgorithmProcess::create_request()
-       */
-      void
-      create_request(const unsigned int other_rank,
-                     std::vector<T1> &  send_buffer) override;
-
-      /**
-       * @copydoc ConsensusAlgorithmProcess::answer_request()
-       */
-      void
-      answer_request(const unsigned int     other_rank,
-                     const std::vector<T1> &buffer_recv,
-                     std::vector<T2> &      request_buffer) override;
-
-      /**
-       * @copydoc ConsensusAlgorithmProcess::prepare_buffer_for_answer()
-       */
-      void
-      prepare_buffer_for_answer(const unsigned int other_rank,
-                                std::vector<T2> &  recv_buffer) override;
-
-      /**
-       * @copydoc ConsensusAlgorithmProcess::read_answer()
-       */
-      void
-      read_answer(const unsigned int     other_rank,
-                  const std::vector<T2> &recv_buffer) override;
-
-    private:
-      const std::function<std::vector<unsigned int>()> function_compute_targets;
-      const std::function<void(const int, std::vector<T1> &)>
-        function_create_request;
-      const std::function<
-        void(const unsigned int, const std::vector<T1> &, std::vector<T2> &)>
-        function_answer_request;
-      const std::function<void(const int, std::vector<T2> &)>
-        function_prepare_buffer_for_answer;
-      const std::function<void(const int, const std::vector<T2> &)>
-        function_read_answer;
-    };
-
-
-
-    template <typename T1, typename T2>
-    AnonymousConsensusAlgorithmProcess<T1, T2>::
-      AnonymousConsensusAlgorithmProcess(
-        const std::function<std::vector<unsigned int>()>
-          &function_compute_targets,
-        const std::function<void(const unsigned int, std::vector<T1> &)>
-          &                                           function_create_request,
-        const std::function<void(const unsigned int,
-                                 const std::vector<T1> &,
-                                 std::vector<T2> &)> &function_answer_request,
-        const std::function<void(const unsigned int, std::vector<T2> &)>
-          &function_prepare_buffer_for_answer,
-        const std::function<void(const unsigned int, const std::vector<T2> &)>
-          &function_read_answer)
-      : function_compute_targets(function_compute_targets)
-      , function_create_request(function_create_request)
-      , function_answer_request(function_answer_request)
-      , function_prepare_buffer_for_answer(function_prepare_buffer_for_answer)
-      , function_read_answer(function_read_answer)
-    {}
-
-
-
-    template <typename T1, typename T2>
-    std::vector<unsigned int>
-    AnonymousConsensusAlgorithmProcess<T1, T2>::compute_targets()
-    {
-      return function_compute_targets();
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    AnonymousConsensusAlgorithmProcess<T1, T2>::create_request(
-      const unsigned int other_rank,
-      std::vector<T1> &  send_buffer)
-    {
-      function_create_request(other_rank, send_buffer);
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    AnonymousConsensusAlgorithmProcess<T1, T2>::answer_request(
-      const unsigned int     other_rank,
-      const std::vector<T1> &buffer_recv,
-      std::vector<T2> &      request_buffer)
-    {
-      function_answer_request(other_rank, buffer_recv, request_buffer);
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    AnonymousConsensusAlgorithmProcess<T1, T2>::prepare_buffer_for_answer(
-      const unsigned int other_rank,
-      std::vector<T2> &  recv_buffer)
-    {
-      function_prepare_buffer_for_answer(other_rank, recv_buffer);
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    AnonymousConsensusAlgorithmProcess<T1, T2>::read_answer(
-      const unsigned int     other_rank,
-      const std::vector<T2> &recv_buffer)
-    {
-      function_read_answer(other_rank, recv_buffer);
-    }
-
-
-  } // end of namespace MPI
-} // end of namespace Utilities
-
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
diff --git a/include/deal.II/base/mpi_consensus_algorithm.templates.h b/include/deal.II/base/mpi_consensus_algorithm.templates.h
deleted file mode 100644 (file)
index 3b39939..0000000
+++ /dev/null
@@ -1,597 +0,0 @@
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2011 - 2019 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-#ifndef dealii_mpi_consensus_algorithm_templates_h
-#define dealii_mpi_consensus_algorithm_templates_h
-
-#include <deal.II/base/config.h>
-
-#include <deal.II/base/exceptions.h>
-#include <deal.II/base/mpi.h>
-#include <deal.II/base/mpi_consensus_algorithm.h>
-#include <deal.II/base/symmetric_tensor.h>
-#include <deal.II/base/tensor.h>
-
-#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/lapack_full_matrix.h>
-#include <deal.II/lac/sparse_matrix.h>
-#include <deal.II/lac/vector.h>
-
-#include <vector>
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace Utilities
-{
-  namespace MPI
-  {
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithmProcess<T1, T2>::answer_request(const unsigned int,
-                                                      const std::vector<T1> &,
-                                                      std::vector<T2> &)
-    {
-      // nothing to do
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithmProcess<T1, T2>::create_request(const unsigned int,
-                                                      std::vector<T1> &)
-    {
-      // nothing to do
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithmProcess<T1, T2>::prepare_buffer_for_answer(
-      const unsigned int,
-      std::vector<T2> &)
-    {
-      // nothing to do
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithmProcess<T1, T2>::read_answer(const unsigned int,
-                                                   const std::vector<T2> &)
-    {
-      // nothing to do
-    }
-
-
-
-    template <typename T1, typename T2>
-    ConsensusAlgorithm<T1, T2>::ConsensusAlgorithm(
-      ConsensusAlgorithmProcess<T1, T2> &process,
-      const MPI_Comm &                   comm)
-      : process(process)
-      , comm(comm)
-      , my_rank(this_mpi_process(comm))
-      , n_procs(n_mpi_processes(comm))
-    {}
-
-
-
-    template <typename T1, typename T2>
-    ConsensusAlgorithm_NBX<T1, T2>::ConsensusAlgorithm_NBX(
-      ConsensusAlgorithmProcess<T1, T2> &process,
-      const MPI_Comm &                   comm)
-      : ConsensusAlgorithm<T1, T2>(process, comm)
-    {}
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_NBX<T1, T2>::run()
-    {
-      static CollectiveMutex      mutex;
-      CollectiveMutex::ScopedLock lock(mutex, this->comm);
-
-      // 1) send requests and start receiving the answers
-      start_communication();
-
-      // 2) answer requests and check if all requests of this process have been
-      //    answered
-      while (!check_own_state())
-        answer_requests();
-
-      // 3) signal to all other processes that all requests of this process have
-      //    been answered
-      signal_finish();
-
-      // 4) nevertheless, this process has to keep on answering (potential)
-      //    incoming requests until all processes have received the
-      //    answer to all requests
-      while (!check_global_state())
-        answer_requests();
-
-      // 5) process the answer to all requests
-      clean_up_and_end_communication();
-    }
-
-
-
-    template <typename T1, typename T2>
-    bool
-    ConsensusAlgorithm_NBX<T1, T2>::check_own_state()
-    {
-#ifdef DEAL_II_WITH_MPI
-      int        all_receive_requests_are_done;
-      const auto ierr = MPI_Testall(recv_requests.size(),
-                                    recv_requests.data(),
-                                    &all_receive_requests_are_done,
-                                    MPI_STATUSES_IGNORE);
-      AssertThrowMPI(ierr);
-
-      return all_receive_requests_are_done;
-#else
-      return true;
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_NBX<T1, T2>::signal_finish()
-    {
-#ifdef DEAL_II_WITH_MPI
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
-      const auto ierr = MPI_Ibarrier(this->comm, &barrier_request);
-      AssertThrowMPI(ierr);
-#  else
-      AssertThrow(
-        false,
-        ExcMessage(
-          "ConsensusAlgorithm_NBX uses MPI 3.0 features. You should compile with at least MPI 3.0."));
-#  endif
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    bool
-    ConsensusAlgorithm_NBX<T1, T2>::check_global_state()
-    {
-#ifdef DEAL_II_WITH_MPI
-      int        all_ranks_reached_barrier;
-      const auto ierr = MPI_Test(&barrier_request,
-                                 &all_ranks_reached_barrier,
-                                 MPI_STATUSES_IGNORE);
-      AssertThrowMPI(ierr);
-      return all_ranks_reached_barrier;
-#else
-      return true;
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_NBX<T1, T2>::answer_requests()
-    {
-#ifdef DEAL_II_WITH_MPI
-
-      const int tag_request =
-        Utilities::MPI::internal::Tags::consensus_algorithm_nbx_answer_request;
-      const int tag_deliver =
-        Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_deliver;
-
-      // check if there is a request pending
-      MPI_Status status;
-      int        request_is_pending;
-      const auto ierr = MPI_Iprobe(
-        MPI_ANY_SOURCE, tag_request, this->comm, &request_is_pending, &status);
-      AssertThrowMPI(ierr);
-
-      if (request_is_pending) // request is pending
-        {
-          // get rank of requesting process
-          const auto other_rank = status.MPI_SOURCE;
-
-#  ifdef DEBUG
-          Assert(requesting_processes.find(other_rank) ==
-                   requesting_processes.end(),
-                 ExcMessage("Process is requesting a second time!"));
-          requesting_processes.insert(other_rank);
-#  endif
-
-          std::vector<T1> buffer_recv;
-          // get size of of incoming message
-          int  number_amount;
-          auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
-          AssertThrowMPI(ierr);
-
-          // allocate memory for incoming message
-          Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
-          buffer_recv.resize(number_amount / sizeof(T1));
-          ierr = MPI_Recv(buffer_recv.data(),
-                          number_amount,
-                          MPI_BYTE,
-                          other_rank,
-                          tag_request,
-                          this->comm,
-                          &status);
-          AssertThrowMPI(ierr);
-
-          // allocate memory for answer message
-          request_buffers.emplace_back(
-            std_cxx14::make_unique<std::vector<T2>>());
-          request_requests.emplace_back(std_cxx14::make_unique<MPI_Request>());
-
-          // process request
-          auto &request_buffer = *request_buffers.back();
-          this->process.answer_request(other_rank, buffer_recv, request_buffer);
-
-          // start to send answer back
-          ierr = MPI_Isend(request_buffer.data(),
-                           request_buffer.size() * sizeof(T2),
-                           MPI_BYTE,
-                           other_rank,
-                           tag_deliver,
-                           this->comm,
-                           request_requests.back().get());
-          AssertThrowMPI(ierr);
-        }
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_NBX<T1, T2>::start_communication()
-    {
-#ifdef DEAL_II_WITH_MPI
-      // 1)
-      targets              = this->process.compute_targets();
-      const auto n_targets = targets.size();
-
-      const int tag_request =
-        Utilities::MPI::internal::Tags::consensus_algorithm_nbx_answer_request;
-      const int tag_deliver =
-        Utilities::MPI::internal::Tags::consensus_algorithm_nbx_process_deliver;
-
-      // 2) allocate memory
-      recv_buffers.resize(n_targets);
-      recv_requests.resize(n_targets);
-      send_requests.resize(n_targets);
-      send_buffers.resize(n_targets);
-
-      {
-        // 4) send and receive
-        for (unsigned int i = 0; i < n_targets; i++)
-          {
-            const unsigned int rank  = targets[i];
-            const unsigned int index = i;
-
-            // translate index set to a list of pairs
-            auto &send_buffer = send_buffers[index];
-            this->process.create_request(rank, send_buffer);
-
-            // start to send data
-            auto ierr = MPI_Isend(send_buffer.data(),
-                                  send_buffer.size() * sizeof(T1),
-                                  MPI_BYTE,
-                                  rank,
-                                  tag_request,
-                                  this->comm,
-                                  &send_requests[index]);
-            AssertThrowMPI(ierr);
-
-            // start to receive data
-            auto &recv_buffer = recv_buffers[index];
-            this->process.prepare_buffer_for_answer(rank, recv_buffer);
-            ierr = MPI_Irecv(recv_buffer.data(),
-                             recv_buffer.size() * sizeof(T2),
-                             MPI_BYTE,
-                             rank,
-                             tag_deliver,
-                             this->comm,
-                             &recv_requests[index]);
-            AssertThrowMPI(ierr);
-          }
-      }
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_NBX<T1, T2>::clean_up_and_end_communication()
-    {
-#ifdef DEAL_II_WITH_MPI
-      // clean up
-      {
-        if (send_requests.size() > 0)
-          {
-            const int ierr = MPI_Waitall(send_requests.size(),
-                                         send_requests.data(),
-                                         MPI_STATUSES_IGNORE);
-            AssertThrowMPI(ierr);
-          }
-
-        if (recv_requests.size() > 0)
-          {
-            const int ierr = MPI_Waitall(recv_requests.size(),
-                                         recv_requests.data(),
-                                         MPI_STATUSES_IGNORE);
-            AssertThrowMPI(ierr);
-          }
-
-
-        const int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
-        AssertThrowMPI(ierr);
-
-        for (auto &i : request_requests)
-          {
-            const auto ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
-            AssertThrowMPI(ierr);
-          }
-
-#  ifdef DEBUG
-        // note: IBarrier seems to make problem during testing, this additional
-        // Barrier seems to help
-        MPI_Barrier(this->comm);
-#  endif
-      }
-
-      // unpack data
-      {
-        for (unsigned int i = 0; i < targets.size(); i++)
-          this->process.read_answer(targets[i], recv_buffers[i]);
-      }
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    ConsensusAlgorithm_PEX<T1, T2>::ConsensusAlgorithm_PEX(
-      ConsensusAlgorithmProcess<T1, T2> &process,
-      const MPI_Comm &                   comm)
-      : ConsensusAlgorithm<T1, T2>(process, comm)
-    {}
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_PEX<T1, T2>::run()
-    {
-      static CollectiveMutex      mutex;
-      CollectiveMutex::ScopedLock lock(mutex, this->comm);
-
-      // 1) send requests and start receiving the answers
-      //    especially determine how many requests are expected
-      const unsigned int n_requests = start_communication();
-
-      // 2) answer requests
-      for (unsigned int request = 0; request < n_requests; request++)
-        answer_requests(request);
-
-      // 3) process answers
-      clean_up_and_end_communication();
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_PEX<T1, T2>::answer_requests(int index)
-    {
-#ifdef DEAL_II_WITH_MPI
-      const int tag_request =
-        Utilities::MPI::internal::Tags::consensus_algorithm_pex_answer_request;
-      const int tag_deliver =
-        Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_deliver;
-
-      MPI_Status status;
-      auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
-      AssertThrowMPI(ierr);
-
-      // get rank of incoming message
-      const auto other_rank = status.MPI_SOURCE;
-
-      std::vector<T1> buffer_recv;
-
-      // get size of incoming message
-      int number_amount;
-      ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
-      AssertThrowMPI(ierr);
-
-      // allocate memory for incoming message
-      Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
-      buffer_recv.resize(number_amount / sizeof(T1));
-      ierr = MPI_Recv(buffer_recv.data(),
-                      number_amount,
-                      MPI_BYTE,
-                      other_rank,
-                      tag_request,
-                      this->comm,
-                      &status);
-      AssertThrowMPI(ierr);
-
-      // process request
-      auto &request_buffer = requests_buffers[index];
-      this->process.answer_request(other_rank, buffer_recv, request_buffer);
-
-      // start to send answer back
-      ierr = MPI_Isend(request_buffer.data(),
-                       request_buffer.size() * sizeof(T2),
-                       MPI_BYTE,
-                       other_rank,
-                       tag_deliver,
-                       this->comm,
-                       &requests_answers[index]);
-      AssertThrowMPI(ierr);
-#else
-      (void)index;
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    unsigned int
-    ConsensusAlgorithm_PEX<T1, T2>::start_communication()
-    {
-#ifdef DEAL_II_WITH_MPI
-      // 1) determine with which processes this process wants to communicate
-      targets = this->process.compute_targets();
-
-      const int tag_request =
-        Utilities::MPI::internal::Tags::consensus_algorithm_pex_answer_request;
-      const int tag_deliver =
-        Utilities::MPI::internal::Tags::consensus_algorithm_pex_process_deliver;
-
-      // 2) determine who wants to communicate with this process
-      sources =
-        compute_point_to_point_communication_pattern(this->comm, targets);
-
-      const auto n_targets = targets.size();
-      const auto n_sources = sources.size();
-
-      // 2) allocate memory
-      recv_buffers.resize(n_targets);
-      send_buffers.resize(n_targets);
-      send_and_recv_buffers.resize(2 * n_targets);
-
-      requests_answers.resize(n_sources);
-      requests_buffers.resize(n_sources);
-
-      // 4) send and receive
-      for (unsigned int i = 0; i < n_targets; i++)
-        {
-          const unsigned int rank = targets[i];
-
-          // pack data which should be sent
-          auto &send_buffer = send_buffers[i];
-          this->process.create_request(rank, send_buffer);
-
-          // start to send data
-          auto ierr = MPI_Isend(send_buffer.data(),
-                                send_buffer.size() * sizeof(T1),
-                                MPI_BYTE,
-                                rank,
-                                tag_request,
-                                this->comm,
-                                &send_and_recv_buffers[n_targets + i]);
-          AssertThrowMPI(ierr);
-
-          // start to receive data
-          auto &recv_buffer = recv_buffers[i];
-          this->process.prepare_buffer_for_answer(rank, recv_buffer);
-          ierr = MPI_Irecv(recv_buffer.data(),
-                           recv_buffer.size() * sizeof(T2),
-                           MPI_BYTE,
-                           rank,
-                           tag_deliver,
-                           this->comm,
-                           &send_and_recv_buffers[i]);
-          AssertThrowMPI(ierr);
-        }
-
-      return sources.size();
-#else
-      return 0;
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithm_PEX<T1, T2>::clean_up_and_end_communication()
-    {
-#ifdef DEAL_II_WITH_MPI
-      // finalize all MPI_Requests
-      if (send_and_recv_buffers.size() > 0)
-        {
-          auto ierr = MPI_Waitall(send_and_recv_buffers.size(),
-                                  send_and_recv_buffers.data(),
-                                  MPI_STATUSES_IGNORE);
-          AssertThrowMPI(ierr);
-        }
-
-      if (requests_answers.size() > 0)
-        {
-          auto ierr = MPI_Waitall(requests_answers.size(),
-                                  requests_answers.data(),
-                                  MPI_STATUSES_IGNORE);
-          AssertThrowMPI(ierr);
-        }
-
-      // unpack received data
-      for (unsigned int i = 0; i < targets.size(); i++)
-        this->process.read_answer(targets[i], recv_buffers[i]);
-#endif
-    }
-
-
-
-    template <typename T1, typename T2>
-    ConsensusAlgorithmSelector<T1, T2>::ConsensusAlgorithmSelector(
-      ConsensusAlgorithmProcess<T1, T2> &process,
-      const MPI_Comm &                   comm)
-      : ConsensusAlgorithm<T1, T2>(process, comm)
-    {
-      // Depending on the number of processes we switch between implementations.
-      // We reduce the threshold for debug mode to be able to test also the
-      // non-blocking implementation. This feature is tested by:
-      // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=15.output
-#ifdef DEAL_II_WITH_MPI
-#  if DEAL_II_MPI_VERSION_GTE(3, 0)
-#    ifdef DEBUG
-      if (Utilities::MPI::n_mpi_processes(comm) > 14)
-#    else
-      if (Utilities::MPI::n_mpi_processes(comm) > 99)
-#    endif
-        consensus_algo.reset(new ConsensusAlgorithm_NBX<T1, T2>(process, comm));
-      else
-#  endif
-#endif
-        consensus_algo.reset(new ConsensusAlgorithm_PEX<T1, T2>(process, comm));
-    }
-
-
-
-    template <typename T1, typename T2>
-    void
-    ConsensusAlgorithmSelector<T1, T2>::run()
-    {
-      consensus_algo->run();
-    }
-
-
-
-  } // end of namespace MPI
-} // end of namespace Utilities
-
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
diff --git a/include/deal.II/base/mpi_consensus_algorithms.h b/include/deal.II/base/mpi_consensus_algorithms.h
new file mode 100644 (file)
index 0000000..40984b2
--- /dev/null
@@ -0,0 +1,665 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_mpi_consensus_algorithm_h
+#define dealii_mpi_consensus_algorithm_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace Utilities
+{
+  namespace MPI
+  {
+    /**
+     * A namespace for consensus algorithms designed for dynamic-sparse
+     * communication patterns.
+     *
+     * @ingroup MPI
+     */
+    namespace ConsensusAlgorithms
+    {
+      /**
+       * An interface to be able to use the Interface classes. The main
+       * functionality of the implementations is to return a list of process
+       * ranks this process wants data from and to deal with the optional
+       * payload of the messages sent/received by the ConsensusAlgorithm
+       * classes.
+       *
+       * There are two kinds of messages:
+       * - send/request message: A message consisting of a data request
+       *   which should be answered by another process. This message is
+       *   considered as a request message by the receiving rank.
+       * - recv message: The answer to a send/request message.
+       *
+       * @tparam T1 the type of the elements of the vector to sent
+       * @tparam T2 the type of the elements of the vector to received
+       *
+       * @note Since the payloads of the messages are optional, users have
+       *       to deal with buffers themselves. The ConsensusAlgorithm classes
+       * 1) deliver only references to empty vectors (of size 0) the data to be
+       * sent can be inserted to or read from, and 2) communicate these vectors
+       * blindly.
+       *
+       * @author Peter Munch, 2019
+       */
+      template <typename T1, typename T2>
+      class Process
+      {
+      public:
+        /**
+         * Destructor.
+         */
+        virtual ~Process() = default;
+
+        /**
+         * @return A vector of ranks this process wants to send a request to.
+         *
+         * @note This is the only method which has to be implemented since the
+         *       payloads of the messages are optional.
+         */
+        virtual std::vector<unsigned int>
+        compute_targets() = 0;
+
+        /**
+         * Add to the request to the process with the specified rank a payload.
+         *
+         * @param[in]  other_rank rank of the process
+         * @param[out] send_buffer data to be sent part of the request
+         * (optional)
+         *
+         * @note The buffer is empty. Before using it, you have to set its size.
+         */
+        virtual void
+        create_request(const unsigned int other_rank,
+                       std::vector<T1> &  send_buffer);
+
+        /**
+         * Prepare the buffer where the payload of the answer of the request to
+         * the process with the specified rank is saved in. The most obvious
+         * task is to resize the buffer, since it is empty when the function is
+         * called.
+         *
+         * @param[in]  other_rank rank of the process
+         * @param[out] recv_buffer data to be sent part of the request
+         * (optional)
+         */
+        virtual void
+        prepare_buffer_for_answer(const unsigned int other_rank,
+                                  std::vector<T2> &  recv_buffer);
+
+        /**
+         * Prepare the buffer where the payload of the answer of the request to
+         * the process with the specified rank is saved in.
+         *
+         * @param[in]  other_rank rank of the process
+         * @param[in]  buffer_recv received payload (optional)
+         * @param[out] request_buffer payload to be sent as part of the request
+         *             (optional)
+         *
+         * @note The request_buffer is empty. Before using it, you have to set
+         *       its size.
+         */
+        virtual void
+        answer_request(const unsigned int     other_rank,
+                       const std::vector<T1> &buffer_recv,
+                       std::vector<T2> &      request_buffer);
+
+        /**
+         * Process the payload of the answer of the request to the process with
+         * the specified rank.
+         *
+         * @param[in] other_rank rank of the process
+         * @param[in] recv_buffer data to be sent part of the request (optional)
+         */
+        virtual void
+        read_answer(const unsigned int     other_rank,
+                    const std::vector<T2> &recv_buffer);
+      };
+
+
+
+      /**
+       * A base class for algorithms that implement the task of coming up with
+       * communication patterns to retrieve data from other processes in a
+       * dynamic-sparse way. In computer science, this is often called a
+       * <a href="https://en.wikipedia.org/wiki/Consensus_algorithm">consensus
+       * problem</a>.
+       *
+       * Dynamic-sparse means in this context:
+       * - By the time this function is called, the other processes do
+       *   not know yet that they have to answer requests.
+       * - Each process only has to communicate with a small subset of
+       *   processes of the MPI communicator.
+       *
+       * Naturally, the user has to provide:
+       * - A communicator.
+       * - For each rank a list of ranks of processes this process should
+       *   communicate to.
+       * - Functionality to pack/unpack data to be sent/received.
+       *
+       * This base class only introduces a basic interface to achieve
+       * these goals, while derived classes implement different algorithms
+       * to actually compute such communication patterns.
+       * The last two features of the list above this paragraph are implemented
+       * in classes derived from ConsensusAlgorithm::Process.
+       *
+       * @tparam T1 The type of the elements of the vector to be sent.
+       * @tparam T2 The type of the elements of the vector to be received.
+       *
+       * @author Peter Munch, 2019
+       */
+      template <typename T1, typename T2>
+      class Interface
+      {
+      public:
+        Interface(Process<T1, T2> &process, const MPI_Comm &comm);
+
+        /**
+         * Destructor.
+         */
+        virtual ~Interface() = default;
+
+        /**
+         * Run consensus algorithm.
+         */
+        virtual void
+        run() = 0;
+
+      protected:
+        /**
+         * Reference to the process provided by the user.
+         */
+        Process<T1, T2> &process;
+
+        /**
+         * MPI communicator.
+         */
+        const MPI_Comm &comm;
+
+        /**
+         * Rank of this process.
+         */
+        const unsigned int my_rank;
+
+        /**
+         * Number of processes in the communicator.
+         */
+        const unsigned int n_procs;
+      };
+
+
+      /**
+       * This class implements a concrete algorithm for the
+       * ConsensusAlgorithms::Interface base class, using only point-to-point
+       * communications and a single IBarrier.
+       *
+       * @note This class closely follows the paper Hoefler, Siebert, Lumsdaine
+       *       "Scalable Communication Protocols for Dynamic Sparse Data
+       *       Exchange". Since the algorithm shown there is not considering
+       *       payloads, the algorithm has been modified here in such a way that
+       *       synchronous sends (Issend) have been replaced by equivalent
+       *       Isend/Irecv, where Irecv receives the answer to a request (with
+       *       payload).
+       *
+       * @tparam T1 The type of the elements of the vector to be sent.
+       * @tparam T2 The type of the elements of the vector to be received.
+       *
+       * @author Peter Munch, 2019
+       */
+      template <typename T1, typename T2>
+      class NBX : public Interface<T1, T2>
+      {
+      public:
+        /**
+         * Constructor.
+         *
+         * @param process Process to be run during consensus algorithm.
+         * @param comm MPI Communicator
+         */
+        NBX(Process<T1, T2> &process, const MPI_Comm &comm);
+
+        /**
+         * Destructor.
+         */
+        virtual ~NBX() = default;
+
+        /**
+         * @copydoc Interface::run()
+         */
+        virtual void
+        run() override;
+
+      private:
+#ifdef DEAL_II_WITH_MPI
+        /**
+         * List of processes this process wants to send requests to.
+         */
+        std::vector<unsigned int> targets;
+
+        /**
+         * Buffers for sending requests.
+         */
+        std::vector<std::vector<T1>> send_buffers;
+
+        /**
+         * Requests for sending requests.
+         */
+        std::vector<MPI_Request> send_requests;
+
+        /**
+         * Buffers for receiving answers to requests.
+         */
+        std::vector<std::vector<T2>> recv_buffers;
+
+
+        /**
+         * Requests for receiving answers to requests.
+         */
+        std::vector<MPI_Request> recv_requests;
+
+        /**
+         * Buffers for sending answers to requests.
+         */
+        std::vector<std::unique_ptr<std::vector<T2>>> request_buffers;
+
+        /**
+         * Requests for sending answers to requests.
+         */
+        std::vector<std::unique_ptr<MPI_Request>> request_requests;
+
+        // request for barrier
+        MPI_Request barrier_request;
+#endif
+
+#ifdef DEBUG
+        /**
+         * List of processes who have made a request to this process.
+         */
+        std::set<unsigned int> requesting_processes;
+#endif
+
+        /**
+         * Check if all request answers have been received by this rank.
+         */
+        bool
+        check_own_state();
+
+        /**
+         * Signal to all other ranks that this rank has received all request
+         * answers via entering IBarrier.
+         */
+        void
+        signal_finish();
+
+        /**
+         * Check if all ranks have received all their request answers, i.e.
+         * all ranks have reached the IBarrier.
+         */
+        bool
+        check_global_state();
+
+        /**
+         * A request message from another rank has been received: process the
+         * request and send an answer.
+         */
+        void
+        answer_requests();
+
+        /**
+         * Start to send all requests via ISend and post IRecvs for the incoming
+         * answer messages.
+         */
+        void
+        start_communication();
+
+        /**
+         * After all rank has received all answers, the MPI data structures can
+         * be freed and the received answers can be processed.
+         */
+        void
+        clean_up_and_end_communication();
+      };
+
+      /**
+       * This class implements a concrete algorithm for the
+       * ConsensusAlgorithms::Interface base class, using a two step approach.
+       * In the first step the source ranks are determined and in the second
+       * step a static sparse data exchange is performed.
+       *
+       * @note In contrast to NBX, this class splits the same
+       *       task into two distinct steps. In the first step, all processes
+       * are identified who want to send a request to this process. In the
+       *       second step, the data is exchanged. However, since - in the
+       * second step - now it is clear how many requests have to be answered,
+       * i.e. when this process can stop waiting for requests, no IBarrier is
+       *       needed.
+       *
+       * @note The function
+       *       Utilities::MPI::compute_point_to_point_communication_pattern() is
+       *       used to determine the source processes, which implements a
+       *       PEX-algorithm from Hoefner et al., "Scalable Communication
+       *       Protocols for Dynamic Sparse Data Exchange".
+       *
+       * @tparam T1 The type of the elements of the vector to be sent.
+       * @tparam T2 The type of the elements of the vector to be received.
+       *
+       * @author Peter Munch, 2019
+       */
+      template <typename T1, typename T2>
+      class PEX : public Interface<T1, T2>
+      {
+      public:
+        /**
+         * Constructor.
+         *
+         * @param process Process to be run during consensus algorithm.
+         * @param comm MPI Communicator
+         */
+        PEX(Process<T1, T2> &process, const MPI_Comm &comm);
+
+        /**
+         * Destructor.
+         */
+        virtual ~PEX() = default;
+
+        /**
+         * @copydoc Interface::run()
+         */
+        virtual void
+        run() override;
+
+      private:
+#ifdef DEAL_II_WITH_MPI
+        /**
+         * List of ranks of processes this processes wants to send a request to.
+         */
+        std::vector<unsigned int> targets;
+
+        /**
+         * List of ranks of processes wanting to send a request to this process.
+         */
+        std::vector<unsigned int> sources;
+
+        // data structures to send and receive requests
+
+        /**
+         * Buffers for sending requests.
+         */
+        std::vector<std::vector<T1>> send_buffers;
+
+        /**
+         * Buffers for receiving answers to requests.
+         */
+        std::vector<std::vector<T2>> recv_buffers;
+
+        /**
+         * Requests for sending requests and receiving answers to requests.
+         */
+        std::vector<MPI_Request> send_and_recv_buffers;
+
+        /**
+         * Buffers for sending answers to requests.
+         */
+        std::vector<std::vector<T2>> requests_buffers;
+
+        /**
+         * Requests for sending answers to requests.
+         */
+        std::vector<MPI_Request> requests_answers;
+#endif
+
+        /**
+         * The ith request message from another rank has been received: process
+         * the request and send an answer.
+         */
+        void
+        answer_requests(int index);
+
+        /**
+         * Start to send all requests via ISend and post IRecvs for the incoming
+         * answer messages.
+         */
+        unsigned int
+        start_communication();
+
+        /**
+         * After all answers have been exchanged, the MPI data structures can be
+         * freed and the received answers can be processed.
+         */
+        void
+        clean_up_and_end_communication();
+      };
+
+      /**
+       * A class which delegates its task to other
+       * ConsensusAlgorithms::Interface implementations depending on the number
+       * of processes in the MPI communicator. For a small number of processes
+       * it uses PEX and for a large number of processes NBX. The threshold
+       * depends if the program is compiled in debug or release mode.
+       *
+       * @tparam T1 The type of the elements of the vector to be sent.
+       * @tparam T2 The type of the elements of the vector to be received.
+       *
+       * @author Peter Munch, 2019
+       */
+      template <typename T1, typename T2>
+      class Selector : public Interface<T1, T2>
+      {
+      public:
+        /**
+         * Constructor.
+         *
+         * @param process Process to be run during consensus algorithm.
+         * @param comm MPI Communicator.
+         */
+        Selector(Process<T1, T2> &process, const MPI_Comm &comm);
+
+        /**
+         * Destructor.
+         */
+        virtual ~Selector() = default;
+
+        /**
+         * @copydoc Interface::run()
+         *
+         * @note The function call is delegated to another ConsensusAlgorithms::Interface implementation.
+         */
+        virtual void
+        run() override;
+
+      private:
+        // Pointer to the actual ConsensusAlgorithms::Interface implementation.
+        std::shared_ptr<Interface<T1, T2>> consensus_algo;
+      };
+
+      /**
+       * This class implements Utilities::MPI::ConsensusAlgorithms::Process,
+       * using user-provided function wrappers.
+       * The advantage of this class is that users do not have to write their
+       * own implementation but can register lambda functions directly.
+       */
+      template <typename T1, typename T2>
+      class AnonymousProcess : public Process<T1, T2>
+      {
+      public:
+        /**
+         * Register functions that should be called for implementing the
+         * interface of Process.
+         *
+         * @param function_compute_targets called during `compute_targets`.
+         * @param function_create_request called during `create_request`.
+         * @param function_answer_request called during `answer_request`.
+         * @param function_prepare_buffer_for_answer called during
+         *   `prepare_buffer_for_answer`.
+         * @param function_read_answer called during `read_answer`.
+         */
+        AnonymousProcess(
+          const std::function<std::vector<unsigned int>()>
+            &function_compute_targets,
+          const std::function<void(const unsigned int, std::vector<T1> &)>
+            &function_create_request =
+              [](const unsigned int, std::vector<T1> &) {},
+          const std::function<void(const unsigned int,
+                                   const std::vector<T1> &,
+                                   std::vector<T2> &)>
+            &function_answer_request = [](const unsigned int,
+                                          const std::vector<T1> &,
+                                          std::vector<T2> &) {},
+          const std::function<void(const unsigned int, std::vector<T2> &)>
+            &function_prepare_buffer_for_answer =
+              [](const unsigned int, std::vector<T2> &) {},
+          const std::function<void(const unsigned int, const std::vector<T2> &)>
+            &function_read_answer =
+              [](const unsigned int, const std::vector<T2> &) {});
+
+        /**
+         * @copydoc Process::compute_targets()
+         */
+        std::vector<unsigned int>
+        compute_targets() override;
+
+        /**
+         * @copydoc Process::create_request()
+         */
+        void
+        create_request(const unsigned int other_rank,
+                       std::vector<T1> &  send_buffer) override;
+
+        /**
+         * @copydoc Process::answer_request()
+         */
+        void
+        answer_request(const unsigned int     other_rank,
+                       const std::vector<T1> &buffer_recv,
+                       std::vector<T2> &      request_buffer) override;
+
+        /**
+         * @copydoc Process::prepare_buffer_for_answer()
+         */
+        void
+        prepare_buffer_for_answer(const unsigned int other_rank,
+                                  std::vector<T2> &  recv_buffer) override;
+
+        /**
+         * @copydoc Process::read_answer()
+         */
+        void
+        read_answer(const unsigned int     other_rank,
+                    const std::vector<T2> &recv_buffer) override;
+
+      private:
+        const std::function<std::vector<unsigned int>()>
+          function_compute_targets;
+        const std::function<void(const int, std::vector<T1> &)>
+          function_create_request;
+        const std::function<
+          void(const unsigned int, const std::vector<T1> &, std::vector<T2> &)>
+          function_answer_request;
+        const std::function<void(const int, std::vector<T2> &)>
+          function_prepare_buffer_for_answer;
+        const std::function<void(const int, const std::vector<T2> &)>
+          function_read_answer;
+      };
+
+
+
+      template <typename T1, typename T2>
+      AnonymousProcess<T1, T2>::AnonymousProcess(
+        const std::function<std::vector<unsigned int>()>
+          &function_compute_targets,
+        const std::function<void(const unsigned int, std::vector<T1> &)>
+          &                                           function_create_request,
+        const std::function<void(const unsigned int,
+                                 const std::vector<T1> &,
+                                 std::vector<T2> &)> &function_answer_request,
+        const std::function<void(const unsigned int, std::vector<T2> &)>
+          &function_prepare_buffer_for_answer,
+        const std::function<void(const unsigned int, const std::vector<T2> &)>
+          &function_read_answer)
+        : function_compute_targets(function_compute_targets)
+        , function_create_request(function_create_request)
+        , function_answer_request(function_answer_request)
+        , function_prepare_buffer_for_answer(function_prepare_buffer_for_answer)
+        , function_read_answer(function_read_answer)
+      {}
+
+
+
+      template <typename T1, typename T2>
+      std::vector<unsigned int>
+      AnonymousProcess<T1, T2>::compute_targets()
+      {
+        return function_compute_targets();
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      AnonymousProcess<T1, T2>::create_request(const unsigned int other_rank,
+                                               std::vector<T1> &  send_buffer)
+      {
+        function_create_request(other_rank, send_buffer);
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      AnonymousProcess<T1, T2>::answer_request(
+        const unsigned int     other_rank,
+        const std::vector<T1> &buffer_recv,
+        std::vector<T2> &      request_buffer)
+      {
+        function_answer_request(other_rank, buffer_recv, request_buffer);
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      AnonymousProcess<T1, T2>::prepare_buffer_for_answer(
+        const unsigned int other_rank,
+        std::vector<T2> &  recv_buffer)
+      {
+        function_prepare_buffer_for_answer(other_rank, recv_buffer);
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      AnonymousProcess<T1, T2>::read_answer(const unsigned int     other_rank,
+                                            const std::vector<T2> &recv_buffer)
+      {
+        function_read_answer(other_rank, recv_buffer);
+      }
+
+
+
+    } // namespace ConsensusAlgorithms
+  }   // end of namespace MPI
+} // end of namespace Utilities
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/include/deal.II/base/mpi_consensus_algorithms.templates.h b/include/deal.II/base/mpi_consensus_algorithms.templates.h
new file mode 100644 (file)
index 0000000..ab68ab1
--- /dev/null
@@ -0,0 +1,597 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2011 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_mpi_consensus_algorithm_templates_h
+#define dealii_mpi_consensus_algorithm_templates_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/exceptions.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi_consensus_algorithms.h>
+#include <deal.II/base/symmetric_tensor.h>
+#include <deal.II/base/tensor.h>
+
+#include <deal.II/lac/full_matrix.h>
+#include <deal.II/lac/lapack_full_matrix.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/vector.h>
+
+#include <vector>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace Utilities
+{
+  namespace MPI
+  {
+    namespace ConsensusAlgorithms
+    {
+      template <typename T1, typename T2>
+      void
+      Process<T1, T2>::answer_request(const unsigned int,
+                                      const std::vector<T1> &,
+                                      std::vector<T2> &)
+      {
+        // nothing to do
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      Process<T1, T2>::create_request(const unsigned int, std::vector<T1> &)
+      {
+        // nothing to do
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      Process<T1, T2>::prepare_buffer_for_answer(const unsigned int,
+                                                 std::vector<T2> &)
+      {
+        // nothing to do
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      Process<T1, T2>::read_answer(const unsigned int, const std::vector<T2> &)
+      {
+        // nothing to do
+      }
+
+
+
+      template <typename T1, typename T2>
+      Interface<T1, T2>::Interface(Process<T1, T2> &process,
+                                   const MPI_Comm & comm)
+        : process(process)
+        , comm(comm)
+        , my_rank(this_mpi_process(comm))
+        , n_procs(n_mpi_processes(comm))
+      {}
+
+
+
+      template <typename T1, typename T2>
+      NBX<T1, T2>::NBX(Process<T1, T2> &process, const MPI_Comm &comm)
+        : Interface<T1, T2>(process, comm)
+      {}
+
+
+
+      template <typename T1, typename T2>
+      void
+      NBX<T1, T2>::run()
+      {
+        static CollectiveMutex      mutex;
+        CollectiveMutex::ScopedLock lock(mutex, this->comm);
+
+        // 1) send requests and start receiving the answers
+        start_communication();
+
+        // 2) answer requests and check if all requests of this process have
+        // been
+        //    answered
+        while (!check_own_state())
+          answer_requests();
+
+        // 3) signal to all other processes that all requests of this process
+        // have
+        //    been answered
+        signal_finish();
+
+        // 4) nevertheless, this process has to keep on answering (potential)
+        //    incoming requests until all processes have received the
+        //    answer to all requests
+        while (!check_global_state())
+          answer_requests();
+
+        // 5) process the answer to all requests
+        clean_up_and_end_communication();
+      }
+
+
+
+      template <typename T1, typename T2>
+      bool
+      NBX<T1, T2>::check_own_state()
+      {
+#ifdef DEAL_II_WITH_MPI
+        int        all_receive_requests_are_done;
+        const auto ierr = MPI_Testall(recv_requests.size(),
+                                      recv_requests.data(),
+                                      &all_receive_requests_are_done,
+                                      MPI_STATUSES_IGNORE);
+        AssertThrowMPI(ierr);
+
+        return all_receive_requests_are_done;
+#else
+        return true;
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      NBX<T1, T2>::signal_finish()
+      {
+#ifdef DEAL_II_WITH_MPI
+#  if DEAL_II_MPI_VERSION_GTE(3, 0)
+        const auto ierr = MPI_Ibarrier(this->comm, &barrier_request);
+        AssertThrowMPI(ierr);
+#  else
+        AssertThrow(
+          false,
+          ExcMessage(
+            "ConsensusAlgorithms::NBX uses MPI 3.0 features. You should compile with at least MPI 3.0."));
+#  endif
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      bool
+      NBX<T1, T2>::check_global_state()
+      {
+#ifdef DEAL_II_WITH_MPI
+        int        all_ranks_reached_barrier;
+        const auto ierr = MPI_Test(&barrier_request,
+                                   &all_ranks_reached_barrier,
+                                   MPI_STATUSES_IGNORE);
+        AssertThrowMPI(ierr);
+        return all_ranks_reached_barrier;
+#else
+        return true;
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      NBX<T1, T2>::answer_requests()
+      {
+#ifdef DEAL_II_WITH_MPI
+
+        const int tag_request = Utilities::MPI::internal::Tags::
+          consensus_algorithm_nbx_answer_request;
+        const int tag_deliver = Utilities::MPI::internal::Tags::
+          consensus_algorithm_nbx_process_deliver;
+
+        // check if there is a request pending
+        MPI_Status status;
+        int        request_is_pending;
+        const auto ierr = MPI_Iprobe(MPI_ANY_SOURCE,
+                                     tag_request,
+                                     this->comm,
+                                     &request_is_pending,
+                                     &status);
+        AssertThrowMPI(ierr);
+
+        if (request_is_pending) // request is pending
+          {
+            // get rank of requesting process
+            const auto other_rank = status.MPI_SOURCE;
+
+#  ifdef DEBUG
+            Assert(requesting_processes.find(other_rank) ==
+                     requesting_processes.end(),
+                   ExcMessage("Process is requesting a second time!"));
+            requesting_processes.insert(other_rank);
+#  endif
+
+            std::vector<T1> buffer_recv;
+            // get size of of incoming message
+            int  number_amount;
+            auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
+            AssertThrowMPI(ierr);
+
+            // allocate memory for incoming message
+            Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
+            buffer_recv.resize(number_amount / sizeof(T1));
+            ierr = MPI_Recv(buffer_recv.data(),
+                            number_amount,
+                            MPI_BYTE,
+                            other_rank,
+                            tag_request,
+                            this->comm,
+                            &status);
+            AssertThrowMPI(ierr);
+
+            // allocate memory for answer message
+            request_buffers.emplace_back(
+              std_cxx14::make_unique<std::vector<T2>>());
+            request_requests.emplace_back(
+              std_cxx14::make_unique<MPI_Request>());
+
+            // process request
+            auto &request_buffer = *request_buffers.back();
+            this->process.answer_request(other_rank,
+                                         buffer_recv,
+                                         request_buffer);
+
+            // start to send answer back
+            ierr = MPI_Isend(request_buffer.data(),
+                             request_buffer.size() * sizeof(T2),
+                             MPI_BYTE,
+                             other_rank,
+                             tag_deliver,
+                             this->comm,
+                             request_requests.back().get());
+            AssertThrowMPI(ierr);
+          }
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      NBX<T1, T2>::start_communication()
+      {
+#ifdef DEAL_II_WITH_MPI
+        // 1)
+        targets              = this->process.compute_targets();
+        const auto n_targets = targets.size();
+
+        const int tag_request = Utilities::MPI::internal::Tags::
+          consensus_algorithm_nbx_answer_request;
+        const int tag_deliver = Utilities::MPI::internal::Tags::
+          consensus_algorithm_nbx_process_deliver;
+
+        // 2) allocate memory
+        recv_buffers.resize(n_targets);
+        recv_requests.resize(n_targets);
+        send_requests.resize(n_targets);
+        send_buffers.resize(n_targets);
+
+        {
+          // 4) send and receive
+          for (unsigned int i = 0; i < n_targets; i++)
+            {
+              const unsigned int rank  = targets[i];
+              const unsigned int index = i;
+
+              // translate index set to a list of pairs
+              auto &send_buffer = send_buffers[index];
+              this->process.create_request(rank, send_buffer);
+
+              // start to send data
+              auto ierr = MPI_Isend(send_buffer.data(),
+                                    send_buffer.size() * sizeof(T1),
+                                    MPI_BYTE,
+                                    rank,
+                                    tag_request,
+                                    this->comm,
+                                    &send_requests[index]);
+              AssertThrowMPI(ierr);
+
+              // start to receive data
+              auto &recv_buffer = recv_buffers[index];
+              this->process.prepare_buffer_for_answer(rank, recv_buffer);
+              ierr = MPI_Irecv(recv_buffer.data(),
+                               recv_buffer.size() * sizeof(T2),
+                               MPI_BYTE,
+                               rank,
+                               tag_deliver,
+                               this->comm,
+                               &recv_requests[index]);
+              AssertThrowMPI(ierr);
+            }
+        }
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      NBX<T1, T2>::clean_up_and_end_communication()
+      {
+#ifdef DEAL_II_WITH_MPI
+        // clean up
+        {
+          if (send_requests.size() > 0)
+            {
+              const int ierr = MPI_Waitall(send_requests.size(),
+                                           send_requests.data(),
+                                           MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+            }
+
+          if (recv_requests.size() > 0)
+            {
+              const int ierr = MPI_Waitall(recv_requests.size(),
+                                           recv_requests.data(),
+                                           MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+            }
+
+
+          const int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
+
+          for (auto &i : request_requests)
+            {
+              const auto ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
+              AssertThrowMPI(ierr);
+            }
+
+#  ifdef DEBUG
+          // note: IBarrier seems to make problem during testing, this
+          // additional Barrier seems to help
+          MPI_Barrier(this->comm);
+#  endif
+        }
+
+        // unpack data
+        {
+          for (unsigned int i = 0; i < targets.size(); i++)
+            this->process.read_answer(targets[i], recv_buffers[i]);
+        }
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      PEX<T1, T2>::PEX(Process<T1, T2> &process, const MPI_Comm &comm)
+        : Interface<T1, T2>(process, comm)
+      {}
+
+
+
+      template <typename T1, typename T2>
+      void
+      PEX<T1, T2>::run()
+      {
+        static CollectiveMutex      mutex;
+        CollectiveMutex::ScopedLock lock(mutex, this->comm);
+
+        // 1) send requests and start receiving the answers
+        //    especially determine how many requests are expected
+        const unsigned int n_requests = start_communication();
+
+        // 2) answer requests
+        for (unsigned int request = 0; request < n_requests; request++)
+          answer_requests(request);
+
+        // 3) process answers
+        clean_up_and_end_communication();
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      PEX<T1, T2>::answer_requests(int index)
+      {
+#ifdef DEAL_II_WITH_MPI
+        const int tag_request = Utilities::MPI::internal::Tags::
+          consensus_algorithm_pex_answer_request;
+        const int tag_deliver = Utilities::MPI::internal::Tags::
+          consensus_algorithm_pex_process_deliver;
+
+        MPI_Status status;
+        auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
+        AssertThrowMPI(ierr);
+
+        // get rank of incoming message
+        const auto other_rank = status.MPI_SOURCE;
+
+        std::vector<T1> buffer_recv;
+
+        // get size of incoming message
+        int number_amount;
+        ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
+        AssertThrowMPI(ierr);
+
+        // allocate memory for incoming message
+        Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
+        buffer_recv.resize(number_amount / sizeof(T1));
+        ierr = MPI_Recv(buffer_recv.data(),
+                        number_amount,
+                        MPI_BYTE,
+                        other_rank,
+                        tag_request,
+                        this->comm,
+                        &status);
+        AssertThrowMPI(ierr);
+
+        // process request
+        auto &request_buffer = requests_buffers[index];
+        this->process.answer_request(other_rank, buffer_recv, request_buffer);
+
+        // start to send answer back
+        ierr = MPI_Isend(request_buffer.data(),
+                         request_buffer.size() * sizeof(T2),
+                         MPI_BYTE,
+                         other_rank,
+                         tag_deliver,
+                         this->comm,
+                         &requests_answers[index]);
+        AssertThrowMPI(ierr);
+#else
+        (void)index;
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      unsigned int
+      PEX<T1, T2>::start_communication()
+      {
+#ifdef DEAL_II_WITH_MPI
+        // 1) determine with which processes this process wants to communicate
+        targets = this->process.compute_targets();
+
+        const int tag_request = Utilities::MPI::internal::Tags::
+          consensus_algorithm_pex_answer_request;
+        const int tag_deliver = Utilities::MPI::internal::Tags::
+          consensus_algorithm_pex_process_deliver;
+
+        // 2) determine who wants to communicate with this process
+        sources =
+          compute_point_to_point_communication_pattern(this->comm, targets);
+
+        const auto n_targets = targets.size();
+        const auto n_sources = sources.size();
+
+        // 2) allocate memory
+        recv_buffers.resize(n_targets);
+        send_buffers.resize(n_targets);
+        send_and_recv_buffers.resize(2 * n_targets);
+
+        requests_answers.resize(n_sources);
+        requests_buffers.resize(n_sources);
+
+        // 4) send and receive
+        for (unsigned int i = 0; i < n_targets; i++)
+          {
+            const unsigned int rank = targets[i];
+
+            // pack data which should be sent
+            auto &send_buffer = send_buffers[i];
+            this->process.create_request(rank, send_buffer);
+
+            // start to send data
+            auto ierr = MPI_Isend(send_buffer.data(),
+                                  send_buffer.size() * sizeof(T1),
+                                  MPI_BYTE,
+                                  rank,
+                                  tag_request,
+                                  this->comm,
+                                  &send_and_recv_buffers[n_targets + i]);
+            AssertThrowMPI(ierr);
+
+            // start to receive data
+            auto &recv_buffer = recv_buffers[i];
+            this->process.prepare_buffer_for_answer(rank, recv_buffer);
+            ierr = MPI_Irecv(recv_buffer.data(),
+                             recv_buffer.size() * sizeof(T2),
+                             MPI_BYTE,
+                             rank,
+                             tag_deliver,
+                             this->comm,
+                             &send_and_recv_buffers[i]);
+            AssertThrowMPI(ierr);
+          }
+
+        return sources.size();
+#else
+        return 0;
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      PEX<T1, T2>::clean_up_and_end_communication()
+      {
+#ifdef DEAL_II_WITH_MPI
+        // finalize all MPI_Requests
+        if (send_and_recv_buffers.size() > 0)
+          {
+            auto ierr = MPI_Waitall(send_and_recv_buffers.size(),
+                                    send_and_recv_buffers.data(),
+                                    MPI_STATUSES_IGNORE);
+            AssertThrowMPI(ierr);
+          }
+
+        if (requests_answers.size() > 0)
+          {
+            auto ierr = MPI_Waitall(requests_answers.size(),
+                                    requests_answers.data(),
+                                    MPI_STATUSES_IGNORE);
+            AssertThrowMPI(ierr);
+          }
+
+        // unpack received data
+        for (unsigned int i = 0; i < targets.size(); i++)
+          this->process.read_answer(targets[i], recv_buffers[i]);
+#endif
+      }
+
+
+
+      template <typename T1, typename T2>
+      Selector<T1, T2>::Selector(Process<T1, T2> &process, const MPI_Comm &comm)
+        : Interface<T1, T2>(process, comm)
+      {
+        // Depending on the number of processes we switch between
+        // implementations. We reduce the threshold for debug mode to be able to
+        // test also the non-blocking implementation. This feature is tested by:
+        // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=15.output
+#ifdef DEAL_II_WITH_MPI
+#  if DEAL_II_MPI_VERSION_GTE(3, 0)
+#    ifdef DEBUG
+        if (Utilities::MPI::n_mpi_processes(comm) > 14)
+#    else
+        if (Utilities::MPI::n_mpi_processes(comm) > 99)
+#    endif
+          consensus_algo.reset(new NBX<T1, T2>(process, comm));
+        else
+#  endif
+#endif
+          consensus_algo.reset(new PEX<T1, T2>(process, comm));
+      }
+
+
+
+      template <typename T1, typename T2>
+      void
+      Selector<T1, T2>::run()
+      {
+        consensus_algo->run();
+      }
+
+
+    } // namespace ConsensusAlgorithms
+  }   // end of namespace MPI
+} // end of namespace Utilities
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index e8b6ca134ff40bd92167bb7095c5fb121218acb7..a293636bb26c7570cb8aa2b063f6557d13dc0d35 100644 (file)
@@ -152,14 +152,14 @@ namespace Utilities
         indexset_want.n_elements());
 
       // set up dictionary
-      Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmPayload
+      Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
         process(indexset_has,
                 indexset_want,
                 communicator,
                 owning_ranks_of_ghosts,
                 true);
 
-      Utilities::MPI::ConsensusAlgorithmSelector<
+      Utilities::MPI::ConsensusAlgorithms::Selector<
         std::pair<types::global_dof_index, types::global_dof_index>,
         unsigned int>
         consensus_algorithm(process, communicator);
index 75ec9449387ff3fa77397d53e56576776a27947e..548df86b964efae707b7074b61a31ad09a8c62dc 100644 (file)
@@ -77,7 +77,7 @@ namespace Utilities
           /// Dictionary::reinit()
           dictionary_reinit,
 
-          /// ConsensusAlgorithmPayload::get_requesters()
+          /// ConsensusAlgorithms::Payload::get_requesters()
           consensus_algorithm_payload_get_requesters,
 
           /// FETools::extrapolate()
@@ -85,14 +85,14 @@ namespace Utilities
           /// FETools::extrapolate(), allocate space for 10 rounds:
           fe_tools_extrapolate_end = fe_tools_extrapolate + 10,
 
-          /// ConsensusAlgorithm_NBX::process
+          /// ConsensusAlgorithms::NBX::process
           consensus_algorithm_nbx_answer_request,
-          /// ConsensusAlgorithm_NBX::process
+          /// ConsensusAlgorithms::NBX::process
           consensus_algorithm_nbx_process_deliver,
 
-          /// ConsensusAlgorithm_PEX::process
+          /// ConsensusAlgorithms::PEX::process
           consensus_algorithm_pex_answer_request,
-          /// ConsensusAlgorithm_PEX::process
+          /// ConsensusAlgorithms::PEX::process
           consensus_algorithm_pex_process_deliver,
 
           /// TriangulationDescription::Utilities::create_description_from_triangulation()
index ee453a1f6e032c7bfeb014bbb30b6811727c0083..9b1b6e0f1c8a2b48ca4e79363f097d54095a3ec7 100644 (file)
@@ -45,7 +45,7 @@ SET(_unity_include_src
   logstream.cc
   hdf5.cc
   mpi.cc
-  mpi_consensus_algorithm.cc
+  mpi_consensus_algorithms.cc
   mpi_noncontiguous_partitioner.cc
   mu_parser_internal.cc
   multithread_info.cc
index 799cbe1f814a18820063001ed41717048e36474c..3d7fe8b5e327bd52a5a14c3b226461ca06ee3eca 100644 (file)
@@ -247,13 +247,13 @@ namespace Utilities
 
     /**
      * A re-implementation of compute_point_to_point_communication_pattern
-     * using the ConsensusAlgorithm.
+     * using a ConsensusAlgorithm.
      */
-    class ConsensusAlgorithmProcessTargets
-      : public ConsensusAlgorithmProcess<unsigned int, unsigned int>
+    class ConsensusAlgorithmsProcessTargets
+      : public ConsensusAlgorithms::Process<unsigned int, unsigned int>
     {
     public:
-      ConsensusAlgorithmProcessTargets(const std::vector<unsigned int> &target)
+      ConsensusAlgorithmsProcessTargets(const std::vector<unsigned int> &target)
         : target(target)
       {}
 
@@ -326,9 +326,9 @@ namespace Utilities
 
 #  if DEAL_II_MPI_VERSION_GTE(3, 0)
 
-      ConsensusAlgorithmProcessTargets process(destinations);
-      ConsensusAlgorithm_NBX<ConsensusAlgorithmProcessTargets::T1,
-                             ConsensusAlgorithmProcessTargets::T2>
+      ConsensusAlgorithmsProcessTargets process(destinations);
+      ConsensusAlgorithms::NBX<ConsensusAlgorithmsProcessTargets::T1,
+                               ConsensusAlgorithmsProcessTargets::T2>
         consensus_algorithm(process, mpi_comm);
       consensus_algorithm.run();
       return process.get_result();
@@ -1029,14 +1029,14 @@ namespace Utilities
       // dictionary, the index set is statically repartitioned among the
       // processes again and extended with information with the actual owner
       // of that the index.
-      internal::ComputeIndexOwner::ConsensusAlgorithmPayload process(
+      internal::ComputeIndexOwner::ConsensusAlgorithmsPayload process(
         owned_indices, indices_to_look_up, comm, owning_ranks);
 
       // Step 2: read dictionary
       // Communicate with the process who owns the index in the static
       // partition (i.e. in the dictionary). This process returns the actual
       // owner of the index.
-      ConsensusAlgorithmSelector<
+      ConsensusAlgorithms::Selector<
         std::pair<types::global_dof_index, types::global_dof_index>,
         unsigned int>
         consensus_algorithm(process, comm);
diff --git a/source/base/mpi_consensus_algorithm.cc b/source/base/mpi_consensus_algorithm.cc
deleted file mode 100644 (file)
index b5ce9f5..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2005 - 2019 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-#include <deal.II/base/mpi_consensus_algorithm.templates.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-namespace Utilities
-{
-  namespace MPI
-  {
-    template class ConsensusAlgorithmProcess<unsigned int, unsigned int>;
-
-    template class ConsensusAlgorithm_NBX<unsigned int, unsigned int>;
-
-    template class ConsensusAlgorithm_PEX<unsigned int, unsigned int>;
-
-    template class ConsensusAlgorithmSelector<unsigned int, unsigned int>;
-
-
-    template class ConsensusAlgorithmProcess<
-      std::pair<types::global_dof_index, types::global_dof_index>,
-      unsigned int>;
-
-    template class ConsensusAlgorithmSelector<
-      std::pair<types::global_dof_index, types::global_dof_index>,
-      unsigned int>;
-
-    template class ConsensusAlgorithm_NBX<
-      std::pair<types::global_dof_index, types::global_dof_index>,
-      unsigned int>;
-
-    template class ConsensusAlgorithm_PEX<
-      std::pair<types::global_dof_index, types::global_dof_index>,
-      unsigned int>;
-
-  } // end of namespace MPI
-} // end of namespace Utilities
-
-DEAL_II_NAMESPACE_CLOSE
diff --git a/source/base/mpi_consensus_algorithms.cc b/source/base/mpi_consensus_algorithms.cc
new file mode 100644 (file)
index 0000000..38b23d7
--- /dev/null
@@ -0,0 +1,55 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2005 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/mpi_consensus_algorithms.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace Utilities
+{
+  namespace MPI
+  {
+    namespace ConsensusAlgorithms
+    {
+      template class Process<unsigned int, unsigned int>;
+
+      template class NBX<unsigned int, unsigned int>;
+
+      template class PEX<unsigned int, unsigned int>;
+
+      template class Selector<unsigned int, unsigned int>;
+
+
+      template class Process<
+        std::pair<types::global_dof_index, types::global_dof_index>,
+        unsigned int>;
+
+      template class Selector<
+        std::pair<types::global_dof_index, types::global_dof_index>,
+        unsigned int>;
+
+      template class NBX<
+        std::pair<types::global_dof_index, types::global_dof_index>,
+        unsigned int>;
+
+      template class PEX<
+        std::pair<types::global_dof_index, types::global_dof_index>,
+        unsigned int>;
+
+    } // namespace ConsensusAlgorithms
+  }   // end of namespace MPI
+} // end of namespace Utilities
+
+DEAL_II_NAMESPACE_CLOSE
index 6f7a778dbc249b5f68b218fca330514b13371ea2..0a6b1a7b9fba3905251428cb702de1adb3bcd018 100644 (file)
@@ -233,7 +233,7 @@ namespace Utilities
         ghost_indices_data.n_elements());
 
       // set up dictionary
-      internal::ComputeIndexOwner::ConsensusAlgorithmPayload process(
+      internal::ComputeIndexOwner::ConsensusAlgorithmsPayload process(
         locally_owned_range_data,
         ghost_indices_data,
         communicator,
@@ -243,7 +243,7 @@ namespace Utilities
       // read dictionary by communicating with the process who owns the index
       // in the static partition (i.e. in the dictionary). This process
       // returns the actual owner of the index.
-      ConsensusAlgorithmSelector<
+      ConsensusAlgorithms::Selector<
         std::pair<types::global_dof_index, types::global_dof_index>,
         unsigned int>
         consensus_algorithm(process, communicator);
@@ -259,7 +259,7 @@ namespace Utilities
               {
                 Assert(i >= ghost_targets_data.back().first,
                        ExcInternalError(
-                         "Expect result of ConsensusAlgorithmProcess to be "
+                         "Expect result of ConsensusAlgorithmsProcess to be "
                          "sorted"));
                 if (i == ghost_targets_data.back().first)
                   ghost_targets_data.back().second++;
index 562bbe72ba29e492da6e69aa6839d9a233c01af0..5341df2d0b26fc5020d3da7fe75b37d6c8fd1af5 100644 (file)
@@ -14,9 +14,9 @@
 // ---------------------------------------------------------------------
 
 
-// Test AnonymousConsensusAlgorithmProcess.
+// Test ConsensusAlgorithms::AnonymousProcess.
 
-#include <deal.II/base/mpi_consensus_algorithm.h>
+#include <deal.II/base/mpi_consensus_algorithms.h>
 
 #include "../tests.h"
 
@@ -31,7 +31,7 @@ test(const MPI_Comm &comm)
   using T1 = unsigned int;
   using T2 = unsigned int;
 
-  dealii::Utilities::MPI::AnonymousConsensusAlgorithmProcess<T1, T2> process(
+  dealii::Utilities::MPI::ConsensusAlgorithms::AnonymousProcess<T1, T2> process(
     [&]() {
       std::vector<unsigned int> result{(my_rank + 1) % n_rank};
       return result;
@@ -55,7 +55,7 @@ test(const MPI_Comm &comm)
       deallog << "ConsensusAlgorithmProcess::function_read_answer() passed!"
               << std::endl;
     });
-  dealii::Utilities::MPI::ConsensusAlgorithmSelector<T1, T2>(process, comm)
+  dealii::Utilities::MPI::ConsensusAlgorithms::Selector<T1, T2>(process, comm)
     .run();
 }
 
index 031f39a2636f99e3955b44f1d1f9ada467538c9d..bb7fe7e96af427e1d8fcb0593eed0c76deeb07e3 100644 (file)
@@ -57,10 +57,10 @@ test()
     std::vector<unsigned int> owning_ranks_of_ghosts(
       local_relevant.n_elements());
 
-    Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmPayload
+    Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
       process(local_owned, local_relevant, comm, owning_ranks_of_ghosts, true);
 
-    Utilities::MPI::ConsensusAlgorithmSelector<
+    Utilities::MPI::ConsensusAlgorithms::Selector<
       std::pair<types::global_dof_index, types::global_dof_index>,
       unsigned int>
       consensus_algorithm(process, comm);

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.