]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Avoid MPI_Alltoall in Utilities::MPI::Partitioner by using ComputeIndexOwner
authorMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Mon, 16 Sep 2019 07:30:23 +0000 (09:30 +0200)
committerMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Thu, 19 Sep 2019 13:14:11 +0000 (15:14 +0200)
include/deal.II/base/mpi.h
include/deal.II/base/mpi_compute_index_owner_internal.h
source/base/mpi.cc
source/base/partitioner.cc

index d2193cad2c53b0ba9870a3bce1c3ec7c38d5ae24..e144aea01e07154ee86ddb168f46e82f9e458699 100644 (file)
@@ -869,12 +869,13 @@ namespace Utilities
      * This class implements ConsensusAlgorithm, using only point-to-point
      * communications and a single IBarrier.
      *
-     * @note This class closely follows the paper Hoefner et. al. "Scalable
-     *       Communication Protocols for Dynamic Sparse Data Exchange".
-     *       Since the algorithm shown there is not considering payloads, the
-     *       algorithm has been  modified here in such a way that synchronous
-     *       sends (Issend) have been replaced by equivalent Isend/Irecv, where
-     *       Irecv receives the answer to a request (with payload).
+     * @note This class closely follows the paper Hoefler, Siebert, Lumsdaine
+     *       "Scalable Communication Protocols for Dynamic Sparse Data
+     *       Exchange". Since the algorithm shown there is not considering
+     *       payloads, the algorithm has been modified here in such a way that
+     *       synchronous sends (Issend) have been replaced by equivalent
+     *       Isend/Irecv, where Irecv receives the answer to a request (with
+     *       payload).
      *
      * @tparam T1 the type of the elements of the vector to sent
      * @tparam T2 the type of the elements of the vector to received
index 1244873a78c7799c58ba795c0d3be4b5ce426856..22a564afd93739eef5a9247a10b83790c00e6512 100644 (file)
@@ -29,22 +29,77 @@ namespace Utilities
     namespace internal
     {
       /**
-       * An internal namespace used for Utilities::MPI::compute_index_owner().
+       * An internal namespace used for Utilities::MPI::compute_index_owner()
+       * and for Utilities::MPI::Partitioner::set_ghost_indices().
        */
       namespace ComputeIndexOwner
       {
+        /**
+         * Dictionary class with basic partitioning in terms of a single
+         * interval of fixed size known to all MPI ranks for two-stage index
+         * lookup.
+         */
         struct Dictionary
         {
+          /**
+           * A tag attached to the MPI communication during the dictionary
+           * lookup
+           */
           static const unsigned int tag_setup = 11;
 
+          /**
+           * A vector with as many entries as there are dofs in the dictionary
+           * of the current process, and each entry containing the rank of the
+           * owner of that dof in the IndexSet `owned_indices`. This is
+           * queried in the index lookup, so we keep an expanded list.
+           */
           std::vector<unsigned int> actually_owning_ranks;
 
+          /**
+           * A sorted vector containing the MPI ranks appearing in
+           * `actually_owning_ranks`.
+           */
+          std::vector<unsigned int> actually_owning_rank_list;
+
+          /**
+           * The number of unknowns in the dictionary for on each MPI rank
+           * used for the index space splitting. For simplicity of index
+           * lookup without additional communication, this number is the same
+           * on all MPI ranks.
+           */
           types::global_dof_index dofs_per_process;
+
+          /**
+           * The local range of the global index space that is represented in
+           * the dictionary, computed from `dofs_per_process` and the current
+           * MPI rank.
+           */
           std::pair<types::global_dof_index, types::global_dof_index>
-                                  local_range;
+            local_range;
+
+          /**
+           * The actual size, computed as the minimum of dofs_per_process and
+           * the possible end of the index space. Equivalent to
+           * `local_range.second - local_range.first`.
+           */
           types::global_dof_index local_size;
+
+          /**
+           * The global size of the index space.
+           */
           types::global_dof_index size;
 
+          /**
+           * The number of ranks the `owned_indices` IndexSet is distributed
+           * among.
+           */
+          unsigned int n_dict_procs_in_owned_indices;
+
+          /**
+           * Set up the dictionary by computing the partitioning from the
+           * global size and sending the rank information on locally owned
+           * ranges to the owner of the dictionary part.
+           */
           void
           reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
           {
@@ -53,7 +108,7 @@ namespace Utilities
 #ifdef DEAL_II_WITH_MPI
             unsigned int my_rank = this_mpi_process(comm);
 
-            types::global_dof_index              dic_local_rececived = 0;
+            types::global_dof_index              dic_local_received = 0;
             std::map<unsigned int, unsigned int> relevant_procs_map;
 
             // 2) collect relevant processes and process local dict entries
@@ -64,9 +119,10 @@ namespace Utilities
                   unsigned int other_rank = this->dof_to_dict_rank(i);
                   if (other_rank == my_rank)
                     {
-                      this->actually_owning_ranks[i - this->local_range.first] =
-                        my_rank;
-                      dic_local_rececived++;
+                      actually_owning_ranks[i - local_range.first] = my_rank;
+                      dic_local_received++;
+                      if (actually_owning_rank_list.empty())
+                        actually_owning_rank_list.push_back(my_rank);
                     }
                   else if (relevant_procs.empty() ||
                            relevant_procs.back() != other_rank)
@@ -80,16 +136,16 @@ namespace Utilities
               }
             }
 
-            const unsigned int n_relevant_procs = relevant_procs_map.size();
+            n_dict_procs_in_owned_indices = relevant_procs_map.size();
             std::vector<std::vector<
               std::pair<types::global_dof_index, types::global_dof_index>>>
-                                     buffers(n_relevant_procs);
-            std::vector<MPI_Request> request(n_relevant_procs);
+                                     buffers(n_dict_procs_in_owned_indices);
+            std::vector<MPI_Request> request(n_dict_procs_in_owned_indices);
 
             // 3) send messages with local dofs to the right dict process
             {
               std::vector<std::vector<types::global_dof_index>> temp(
-                n_relevant_procs);
+                n_dict_procs_in_owned_indices);
 
               // collect dofs of each dict process
               for (auto i : owned_indices)
@@ -131,9 +187,8 @@ namespace Utilities
                 }
             }
 
-
             // 4) receive messages until all dofs in dict are processed
-            while (this->local_size != dic_local_rececived)
+            while (this->local_size != dic_local_received)
               {
                 // wait for an incoming message
                 MPI_Status status;
@@ -148,6 +203,7 @@ namespace Utilities
                 AssertThrowMPI(ierr);
 
                 const auto other_rank = status.MPI_SOURCE;
+                actually_owning_rank_list.push_back(other_rank);
 
                 // receive message
                 Assert(number_amount % 2 == 0, ExcInternalError());
@@ -171,14 +227,21 @@ namespace Utilities
                     {
                       this->actually_owning_ranks[i - this->local_range.first] =
                         other_rank;
-                      dic_local_rececived++;
+                      dic_local_received++;
                     }
               }
 
+            std::sort(actually_owning_rank_list.begin(),
+                      actually_owning_rank_list.end());
+
+            for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
+              Assert(actually_owning_rank_list[i] >
+                       actually_owning_rank_list[i - 1],
+                     ExcInternalError());
+
             // 5) make sure that all messages have been sent
-            const auto ierr = MPI_Waitall(n_relevant_procs,
-                                          request.data(),
-                                          MPI_STATUSES_IGNORE);
+            const auto ierr =
+              MPI_Waitall(request.size(), request.data(), MPI_STATUSES_IGNORE);
             AssertThrowMPI(ierr);
 #else
             (void)owned_indices;
@@ -186,13 +249,45 @@ namespace Utilities
 #endif
           }
 
+          /**
+           * Translate a global dof index to the MPI rank in the dictionary
+           * using `dofs_per_process`.
+           */
           unsigned int
           dof_to_dict_rank(const types::global_dof_index i)
           {
             return i / dofs_per_process;
           }
 
+          /**
+           * Given the rank in the owned indices from `actually_owning_ranks`,
+           * this returns the index of the rank in the
+           * `actually_owning_rank_list`.
+           */
+          unsigned int
+          get_owning_rank_index(const unsigned int rank_in_owned_indices,
+                                const unsigned int guess = 0)
+          {
+            AssertIndexRange(guess, actually_owning_rank_list.size());
+            if (actually_owning_rank_list[guess] == rank_in_owned_indices)
+              return guess;
+            else
+              {
+                auto it = std::lower_bound(actually_owning_rank_list.begin(),
+                                           actually_owning_rank_list.end(),
+                                           rank_in_owned_indices);
+                Assert(it != actually_owning_rank_list.end(),
+                       ExcInternalError());
+                Assert(*it == rank_in_owned_indices, ExcInternalError());
+                return it - actually_owning_rank_list.begin();
+              }
+          }
+
         private:
+          /**
+           * Compute the partition from the global size of the index space and
+           * the number of ranks.
+           */
           void
           partition(const IndexSet &owned_indices, const MPI_Comm &comm)
           {
@@ -215,51 +310,144 @@ namespace Utilities
           }
         };
 
-        class ConsensusAlgorithmProcess
-          : public dealii::Utilities::MPI::
-              ConsensusAlgorithmProcess<types::global_dof_index, unsigned int>
+
+
+        /**
+         * Specialization of ConsensusAlgorithmProcess for the context of
+         * Utilities::MPI::compute_index_owner() and
+         * Utilities::MPI::Partitioner::set_ghost_indices() with additional
+         * payload.
+         */
+        class ConsensusAlgorithmPayload
+          : public ConsensusAlgorithmProcess<
+              std::pair<types::global_dof_index, types::global_dof_index>,
+              unsigned int>
         {
         public:
-          ConsensusAlgorithmProcess(const IndexSet &owned_indices,
+          /**
+           * Constructor.
+           */
+          ConsensusAlgorithmPayload(const IndexSet &owned_indices,
                                     const IndexSet &indices_to_look_up,
                                     const MPI_Comm &comm,
-                                    std::vector<unsigned int> &owning_ranks)
+                                    std::vector<unsigned int> &owning_ranks,
+                                    const bool track_index_requests = false)
             : owned_indices(owned_indices)
             , indices_to_look_up(indices_to_look_up)
             , comm(comm)
             , my_rank(this_mpi_process(comm))
             , n_procs(n_mpi_processes(comm))
+            , track_index_requests(track_index_requests)
             , owning_ranks(owning_ranks)
           {
-            this->dict.reinit(owned_indices, comm);
+            dict.reinit(owned_indices, comm);
+            requesters.resize(dict.actually_owning_rank_list.size());
           }
 
-          const IndexSet &           owned_indices;
-          const IndexSet &           indices_to_look_up;
-          const MPI_Comm &           comm;
-          const unsigned int         my_rank;
-          const unsigned int         n_procs;
+          /**
+           * The index space which describes the locally owned space.
+           */
+          const IndexSet &owned_indices;
+
+          /**
+           * The indices which are "ghosts" on a given rank and should be
+           * looked up in terms of their owner rank from owned_indices.
+           */
+          const IndexSet &indices_to_look_up;
+
+          /**
+           * The underlying MPI communicator.
+           */
+          const MPI_Comm comm;
+
+          /**
+           * The present MPI rank.
+           */
+          const unsigned int my_rank;
+
+          /**
+           * The total number of ranks participating in the MPI communicator
+           * `comm`.
+           */
+          const unsigned int n_procs;
+
+          /**
+           * Controls whether the origin of ghost owner should also be
+           * stored. If true, it will be added into `requesters` and can be
+           * queried by `get_requesters()`.
+           */
+          const bool track_index_requests;
+
+          /**
+           * The result of the index owner computation: To each index
+           * contained in `indices_to_look_up`, this vector contains the MPI
+           * rank of the owner in `owned_indices`.
+           */
           std::vector<unsigned int> &owning_ranks;
 
+          /**
+           * Keeps track of the origin of the requests. The layout of the data
+           * structure is as follows: The outermost vector has as many entries
+           * as Dictionary::actually_owning_rank_list and represents the
+           * information we should send back to the owners from the present
+           * dictionary entry. The second vector then collects a list of MPI
+           * ranks that have requested data, using the rank in the first pair
+           * entry and a list of index ranges as the second entry.
+           */
+          std::vector<std::vector<
+            std::pair<unsigned int,
+                      std::vector<std::pair<unsigned int, unsigned int>>>>>
+            requesters;
+
+          /**
+           * The dictionary handling the requests.
+           */
           Dictionary dict;
 
-          std::map<unsigned int, std::vector<types::global_dof_index>> temp;
+          /**
+           * Array to collect the indices to look up, sorted by the rank in
+           * the dictionary.
+           */
+          std::map<unsigned int, std::vector<types::global_dof_index>>
+            indices_to_look_up_by_dict_rank;
+
+          /**
+           * The field where the indices for incoming data from the process
+           * are stored.
+           */
           std::map<unsigned int, std::vector<unsigned int>> recv_indices;
 
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::process_request(),
+           * adding the owner of a particular index in request_buffer (and
+           * keeping track of who requested a particular index in case that
+           * information is also desired).
+           */
           virtual void
           process_request(
-            const unsigned int                          other_rank,
-            const std::vector<types::global_dof_index> &buffer_recv,
-            std::vector<unsigned int> &                 request_buffer) override
+            const unsigned int                                     other_rank,
+            const std::vector<std::pair<types::global_dof_index,
+                                        types::global_dof_index>> &buffer_recv,
+            std::vector<unsigned int> &request_buffer) override
           {
-            (void)other_rank;
-            Assert(buffer_recv.size() % 2 == 0, ExcInternalError());
-            for (unsigned int j = 0; j < buffer_recv.size(); j += 2)
-              for (auto i = buffer_recv[j]; i < buffer_recv[j + 1]; i++)
-                request_buffer.push_back(
-                  dict.actually_owning_ranks[i - dict.local_range.first]);
+            unsigned int owner_index = 0;
+            for (const auto interval : buffer_recv)
+              for (auto i = interval.first; i < interval.second; ++i)
+                {
+                  const unsigned int actual_owner =
+                    dict.actually_owning_ranks[i - dict.local_range.first];
+                  request_buffer.push_back(actual_owner);
+
+                  if (track_index_requests)
+                    append_index_origin(i, owner_index, other_rank);
+                }
           }
 
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::compute_targets().
+           */
           virtual std::vector<unsigned int>
           compute_targets() override
           {
@@ -267,13 +455,18 @@ namespace Utilities
 
             // 1) collect relevant processes and process local dict entries
             {
-              unsigned int index = 0;
+              unsigned int index       = 0;
+              unsigned int owner_index = 0;
               for (auto i : indices_to_look_up)
                 {
                   unsigned int other_rank = dict.dof_to_dict_rank(i);
                   if (other_rank == my_rank)
-                    owning_ranks[index] =
-                      dict.actually_owning_ranks[i - dict.local_range.first];
+                    {
+                      owning_ranks[index] =
+                        dict.actually_owning_ranks[i - dict.local_range.first];
+                      if (track_index_requests)
+                        append_index_origin(i, owner_index, my_rank);
+                    }
                   else if (targets.empty() || targets.back() != other_rank)
                     targets.push_back(other_rank);
                   index++;
@@ -283,8 +476,8 @@ namespace Utilities
 
             for (auto i : targets)
               {
-                recv_indices[i] = {};
-                temp[i]         = {};
+                recv_indices[i]                    = {};
+                indices_to_look_up_by_dict_rank[i] = {};
               }
 
             // 3) collect indices for each process
@@ -296,26 +489,31 @@ namespace Utilities
                   if (other_rank != my_rank)
                     {
                       recv_indices[other_rank].push_back(index);
-                      temp[other_rank].push_back(i);
+                      indices_to_look_up_by_dict_rank[other_rank].push_back(i);
                     }
                   index++;
                 }
             }
 
             Assert(targets.size() == recv_indices.size() &&
-                     targets.size() == temp.size(),
+                     targets.size() == indices_to_look_up_by_dict_rank.size(),
                    ExcMessage("Size does not match!"));
 
             return targets;
           }
 
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::pack_recv_buffer().
+           */
           virtual void
-          pack_recv_buffer(
-            const int                             other_rank,
-            std::vector<types::global_dof_index> &send_buffer) override
+          pack_recv_buffer(const int other_rank,
+                           std::vector<std::pair<types::global_dof_index,
+                                                 types::global_dof_index>>
+                             &send_buffer) override
           {
             // create index set and compress data to be sent
-            auto &   indices_i = temp[other_rank];
+            auto &   indices_i = indices_to_look_up_by_dict_rank[other_rank];
             IndexSet is(dict.size);
             is.add_indices(indices_i.begin(), indices_i.end());
             is.compress();
@@ -323,12 +521,14 @@ namespace Utilities
             for (auto interval = is.begin_intervals();
                  interval != is.end_intervals();
                  interval++)
-              {
-                send_buffer.push_back(*interval->begin());
-                send_buffer.push_back(interval->last() + 1);
-              }
+              send_buffer.emplace_back(*interval->begin(),
+                                       interval->last() + 1);
           }
 
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::prepare_recv_buffer().
+           */
           virtual void
           prepare_recv_buffer(const int                  other_rank,
                               std::vector<unsigned int> &recv_buffer) override
@@ -336,6 +536,10 @@ namespace Utilities
             recv_buffer.resize(recv_indices[other_rank].size());
           }
 
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::unpack_recv_buffer().
+           */
           virtual void
           unpack_recv_buffer(
             const int                        other_rank,
@@ -347,6 +551,209 @@ namespace Utilities
             for (unsigned int j = 0; j < recv_indices[other_rank].size(); j++)
               owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
           }
+
+          /**
+           * Resolve the origin of the requests by sending the information
+           * accumulated in terms of the dictionary owners during the run of
+           * the consensus algorithm back to the owner in the original
+           * IndexSet. This requires some point-to-point communication.
+           *
+           * @return Map of processors and associated ranges of indices that
+           *         are requested from the current rank
+           */
+          std::map<unsigned int, IndexSet>
+          get_requesters()
+          {
+            Assert(track_index_requests,
+                   ExcMessage("Must enable index range tracking in"
+                              "constructor of ConsensusAlgorithmProcess"));
+
+            std::map<unsigned int, dealii::IndexSet> requested_indices;
+
+#ifdef DEAL_II_WITH_MPI
+
+            // reserve enough slots for the requests ahead; depending on
+            // whether the owning rank is one of the requesters or not, we
+            // might have one less requests to execute, so fill the requests
+            // on demand.
+            std::vector<MPI_Request> send_requests;
+            send_requests.reserve(requesters.size());
+
+            // We use an integer vector for the data exchange. Since we send
+            // data associated to intervals with different requesters, we will
+            // need to send (a) the MPI rank of the requester, (b) the number
+            // of intervals directed to this requester, and (c) a list of
+            // intervals, i.e., two integers per interval. The number of items
+            // sent in total can be deduced both via the MPI status message at
+            // the receiver site as well as be counting the buckets from
+            // different requesters.
+            std::vector<std::vector<unsigned int>> send_data(requesters.size());
+            for (unsigned int i = 0; i < requesters.size(); ++i)
+              {
+                // special code for our own indices
+                if (dict.actually_owning_rank_list[i] == my_rank)
+                  {
+                    for (const auto &j : requesters[i])
+                      {
+                        const types::global_dof_index index_offset =
+                          static_cast<types::global_dof_index>(my_rank) *
+                          dict.dofs_per_process;
+                        IndexSet &my_index_set = requested_indices[j.first];
+                        my_index_set.set_size(owned_indices.size());
+                        for (const auto &interval : j.second)
+                          my_index_set.add_range(index_offset + interval.first,
+                                                 index_offset +
+                                                   interval.second);
+                      }
+                  }
+                else
+                  {
+                    for (const auto &j : requesters[i])
+                      {
+                        send_data[i].push_back(j.first);
+                        send_data[i].push_back(j.second.size());
+                        for (const auto &interval : j.second)
+                          {
+                            send_data[i].push_back(interval.first);
+                            send_data[i].push_back(interval.second);
+                          }
+                      }
+                    send_requests.push_back(MPI_Request());
+                    const int ierr =
+                      MPI_Isend(send_data[i].data(),
+                                send_data[i].size(),
+                                MPI_UNSIGNED,
+                                dict.actually_owning_rank_list[i],
+                                1021,
+                                comm,
+                                &send_requests.back());
+                    AssertThrowMPI(ierr);
+                  }
+              }
+
+            // receive the data
+            for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices;
+                 ++c)
+              {
+                // wait for an incoming message
+                MPI_Status   status;
+                unsigned int ierr =
+                  MPI_Probe(MPI_ANY_SOURCE, 1021, comm, &status);
+                AssertThrowMPI(ierr);
+
+                // retrieve size of incoming message
+                int number_amount;
+                ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
+                AssertThrowMPI(ierr);
+
+                // receive message
+                Assert(number_amount % 2 == 0, ExcInternalError());
+                std::vector<std::pair<unsigned int, unsigned int>> buffer(
+                  number_amount / 2);
+                ierr = MPI_Recv(buffer.data(),
+                                number_amount,
+                                MPI_UNSIGNED,
+                                status.MPI_SOURCE,
+                                1021,
+                                comm,
+                                &status);
+                AssertThrowMPI(ierr);
+
+                // unpack the message and translate the dictionary-local
+                // indices coming via MPI to the global index range
+                const types::global_dof_index index_offset =
+                  static_cast<types::global_dof_index>(status.MPI_SOURCE) *
+                  dict.dofs_per_process;
+                unsigned int offset = 0;
+                while (offset < buffer.size())
+                  {
+                    AssertIndexRange(offset + buffer[offset].second,
+                                     buffer.size());
+
+                    IndexSet my_index_set(owned_indices.size());
+                    for (unsigned int i = offset + 1;
+                         i < offset + buffer[offset].second + 1;
+                         ++i)
+                      my_index_set.add_range(index_offset + buffer[i].first,
+                                             index_offset + buffer[i].second);
+
+                    // the underlying index set is able to merge ranges coming
+                    // from different ranks due to the partitioning in the
+                    // dictionary
+                    IndexSet &index_set =
+                      requested_indices[buffer[offset].first];
+                    if (index_set.size() == 0)
+                      index_set.set_size(owned_indices.size());
+                    index_set.add_indices(my_index_set);
+
+                    offset += buffer[offset].second + 1;
+                  }
+                AssertDimension(offset, buffer.size());
+              }
+
+            if (send_requests.size() > 0)
+              MPI_Waitall(send_requests.size(),
+                          send_requests.data(),
+                          MPI_STATUSES_IGNORE);
+
+#  ifdef DEBUG
+            for (const auto &it : requested_indices)
+              {
+                IndexSet copy_set = it.second;
+                copy_set.subtract_set(owned_indices);
+                Assert(copy_set.n_elements() == 0,
+                       ExcInternalError(
+                         "The indices requested from the current "
+                         "MPI rank should be locally owned here!"));
+              }
+#  endif
+
+#endif // DEAL_II_WITH_MPI
+
+            return requested_indices;
+          }
+
+        private:
+          /**
+           * Stores the index request in the `requesters` field. We first find
+           * out the owner of the index that was requested (using the guess in
+           * `owner_index`, as we typically might look up on the same rank
+           * several times in a row, which avoids the binary search in
+           * Dictionary::get_owning_rank_index(). Once we know the rank of the
+           * owner, we the vector entry with the rank of the request. Here, we
+           * utilize the fact that requests are processed rank-by-rank, so we
+           * can simply look at the end of the vector if there is already some
+           * data stored or not. Finally, we build ranges, again using that
+           * the index list is sorted and we therefore only need to append at
+           * the end.
+           */
+          void
+          append_index_origin(const types::global_dof_index index,
+                              unsigned int &                owner_index,
+                              const unsigned int            rank_of_request)
+          {
+            // remember who requested which index. We want to use an
+            // std::vector with simple addressing, via a good guess from the
+            // preceding index, rather than std::map, because this is an inner
+            // loop and it avoids the map lookup in every iteration
+            const unsigned int rank_of_owner =
+              dict.actually_owning_ranks[index - dict.local_range.first];
+            owner_index =
+              dict.get_owning_rank_index(rank_of_owner, owner_index);
+            if (requesters[owner_index].empty() ||
+                requesters[owner_index].back().first != rank_of_request)
+              requesters[owner_index].emplace_back(
+                rank_of_request,
+                std::vector<std::pair<unsigned int, unsigned int>>());
+            if (requesters[owner_index].back().second.empty() ||
+                requesters[owner_index].back().second.back().second !=
+                  index - dict.local_range.first)
+              requesters[owner_index].back().second.emplace_back(
+                index - dict.local_range.first,
+                index - dict.local_range.first + 1);
+            else
+              ++requesters[owner_index].back().second.back().second;
+          }
         };
 
       } // namespace ComputeIndexOwner
index 364cd5fb2244b4229e765e1c910fc0a49d19ef15..e75884d9affb7879788c43d1b4c557cdba4acf3f 100644 (file)
@@ -995,16 +995,15 @@ namespace Utilities
           std::vector<T1> buffer_recv;
           // get size of of incoming message
           int  number_amount;
-          auto ierr = MPI_Get_count(&status,
-                                    internal::mpi_type_id(buffer_recv.data()),
-                                    &number_amount);
+          auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
           AssertThrowMPI(ierr);
 
           // allocate memory for incoming message
-          buffer_recv.resize(number_amount);
+          Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
+          buffer_recv.resize(number_amount / sizeof(T1));
           ierr = MPI_Recv(buffer_recv.data(),
                           number_amount,
-                          internal::mpi_type_id(buffer_recv.data()),
+                          MPI_BYTE,
                           other_rank,
                           tag_request,
                           this->comm,
@@ -1023,8 +1022,8 @@ namespace Utilities
 
           // start to send answer back
           ierr = MPI_Isend(request_buffer.data(),
-                           request_buffer.size(),
-                           internal::mpi_type_id(request_buffer.data()),
+                           request_buffer.size() * sizeof(T2),
+                           MPI_BYTE,
                            other_rank,
                            tag_delivery,
                            this->comm,
@@ -1064,8 +1063,8 @@ namespace Utilities
 
             // start to send data
             auto ierr = MPI_Isend(send_buffer.data(),
-                                  send_buffer.size(),
-                                  internal::mpi_type_id(send_buffer.data()),
+                                  send_buffer.size() * sizeof(T1),
+                                  MPI_BYTE,
                                   rank,
                                   tag_request,
                                   this->comm,
@@ -1076,8 +1075,8 @@ namespace Utilities
             auto &recv_buffer = recv_buffers[index];
             this->process.prepare_recv_buffer(rank, recv_buffer);
             ierr = MPI_Irecv(recv_buffer.data(),
-                             recv_buffer.size(),
-                             internal::mpi_type_id(recv_buffer.data()),
+                             recv_buffer.size() * sizeof(T2),
+                             MPI_BYTE,
                              rank,
                              tag_delivery,
                              this->comm,
@@ -1149,12 +1148,10 @@ namespace Utilities
       using T2 = int;
 
       virtual void
-      process_request(const unsigned int     other_rank,
-                      const std::vector<T1> &buffer_recv,
-                      std::vector<T2> &      request_buffer) override
+      process_request(const unsigned int other_rank,
+                      const std::vector<T1> &,
+                      std::vector<T2> &) override
       {
-        (void)buffer_recv;
-        (void)request_buffer;
         this->sources.push_back(other_rank);
       }
 
@@ -1237,16 +1234,15 @@ namespace Utilities
 
       // get size of incoming message
       int  number_amount;
-      auto ierr = MPI_Get_count(&status,
-                                internal::mpi_type_id(buffer_recv.data()),
-                                &number_amount);
+      auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
       AssertThrowMPI(ierr);
 
       // allocate memory for incoming message
-      buffer_recv.resize(number_amount);
+      Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
+      buffer_recv.resize(number_amount / sizeof(T1));
       ierr = MPI_Recv(buffer_recv.data(),
                       number_amount,
-                      internal::mpi_type_id(buffer_recv.data()),
+                      MPI_BYTE,
                       other_rank,
                       tag_request,
                       this->comm,
@@ -1259,8 +1255,8 @@ namespace Utilities
 
       // start to send answer back
       ierr = MPI_Isend(request_buffer.data(),
-                       request_buffer.size(),
-                       MPI_UNSIGNED,
+                       request_buffer.size() * sizeof(T2),
+                       MPI_BYTE,
                        other_rank,
                        tag_delivery,
                        this->comm,
@@ -1320,8 +1316,8 @@ namespace Utilities
 
           // start to send data
           auto ierr = MPI_Isend(send_buffer.data(),
-                                send_buffer.size(),
-                                internal::mpi_type_id(send_buffer.data()),
+                                send_buffer.size() * sizeof(T1),
+                                MPI_BYTE,
                                 rank,
                                 tag_request,
                                 this->comm,
@@ -1332,8 +1328,8 @@ namespace Utilities
           auto &recv_buffer = recv_buffers[i];
           this->process.prepare_recv_buffer(rank, recv_buffer);
           ierr = MPI_Irecv(recv_buffer.data(),
-                           recv_buffer.size(),
-                           MPI_UNSIGNED,
+                           recv_buffer.size() * sizeof(T2),
+                           MPI_BYTE,
                            rank,
                            tag_delivery,
                            this->comm,
@@ -1424,14 +1420,16 @@ namespace Utilities
       // dictionary, the index set is statically repartitioned among the
       // processes again and extended with information with the actual owner
       // of that the index.
-      internal::ComputeIndexOwner::ConsensusAlgorithmProcess process(
+      internal::ComputeIndexOwner::ConsensusAlgorithmPayload process(
         owned_indices, indices_to_look_up, comm, owning_ranks);
 
       // Step 2: read dictionary
       // Communicate with the process who owns the index in the static
-      // partition (i.e. in the partition). This process returns the actual
+      // partition (i.e. in the dictionary). This process returns the actual
       // owner of the index.
-      ConsensusAlgorithmSelector<types::global_dof_index, unsigned int>
+      ConsensusAlgorithmSelector<
+        std::pair<types::global_dof_index, types::global_dof_index>,
+        unsigned int>
         consensus_algorithm(process, comm);
       consensus_algorithm.run();
 
index 133e61e49a3b6e88c6a6e0497956e1f42213aba3..4a63c4bc81e2509c6c3388b6290e2d2e4b55d537 100644 (file)
@@ -13,6 +13,7 @@
 //
 // ---------------------------------------------------------------------
 
+#include <deal.II/base/mpi_compute_index_owner_internal.h>
 #include <deal.II/base/partitioner.h>
 #include <deal.II/base/partitioner.templates.h>
 
@@ -192,215 +193,154 @@ namespace Utilities
           return;
         }
 
-      std::vector<types::global_dof_index> first_index(n_procs + 1);
+      types::global_dof_index my_size = local_size();
       // Allow non-zero start index for the vector. send this data to all
       // processors
-      first_index[0] = local_range_data.first;
-      int ierr       = MPI_Bcast(
-        first_index.data(), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0, communicator);
-      AssertThrowMPI(ierr);
+      if (my_pid == 0)
+        my_size += local_range_data.first;
 
-      // Get the end-of-local_range for all processors
-      ierr = MPI_Allgather(&local_range_data.second,
-                           1,
-                           DEAL_II_DOF_INDEX_MPI_TYPE,
-                           &first_index[1],
-                           1,
-                           DEAL_II_DOF_INDEX_MPI_TYPE,
-                           communicator);
-      AssertThrowMPI(ierr);
-      first_index[n_procs] = global_size;
-
-      // fix case when there are some processors without any locally owned
-      // indices: then there might be a zero in some entries. The reason
-      // is that local_range_data will contain [0,0) and second index is
-      // incorrect inside the Allgather'ed first_index. Below we fix this
-      // by ensuring that the start point is always the end index of the
-      // processor immediately before.
-      if (global_size > 0)
+      types::global_dof_index my_shift = 0;
+      {
+        const int ierr = MPI_Exscan(&my_size,
+                                    &my_shift,
+                                    1,
+                                    DEAL_II_DOF_INDEX_MPI_TYPE,
+                                    MPI_SUM,
+                                    communicator);
+        AssertThrowMPI(ierr);
+      }
+      if (my_shift != local_range_data.first)
         {
-          for (unsigned int i = 1; i < n_procs; ++i)
-            if (first_index[i] == 0)
-              first_index[i] = first_index[i - 1];
-
-          // correct if our processor has a wrong local range
-          if (first_index[my_pid] != local_range_data.first)
-            {
-              Assert(local_range_data.first == local_range_data.second,
-                     ExcInternalError());
-              local_range_data.first = local_range_data.second =
-                first_index[my_pid];
-            }
+          const types::global_dof_index old_local_size = local_size();
+          local_range_data.first                       = my_shift;
+          local_range_data.second = my_shift + old_local_size;
         }
 
-      unsigned int n_ghost_targets = 0;
-      {
-        const auto index_owner =
-          Utilities::MPI::compute_index_owner(this->locally_owned_range_data,
-                                              this->ghost_indices_data,
-                                              this->communicator);
+      std::vector<unsigned int> owning_ranks_of_ghosts(
+        ghost_indices_data.n_elements());
+
+      // set up dictionary
+      internal::ComputeIndexOwner::ConsensusAlgorithmPayload process(
+        locally_owned_range_data,
+        ghost_indices_data,
+        communicator,
+        owning_ranks_of_ghosts,
+        /* track origins of ghosts*/ true);
+
+      // read dictionary by communicating with the process who owns the index
+      // in the static partition (i.e. in the dictionary). This process
+      // returns the actual owner of the index.
+      ConsensusAlgorithmSelector<
+        std::pair<types::global_dof_index, types::global_dof_index>,
+        unsigned int>
+        consensus_algorithm(process, communicator);
+      consensus_algorithm.run();
 
-        ghost_targets_data.clear();
+      {
+        ghost_targets_data = {};
 
-        if (index_owner.size() > 0)
+        if (owning_ranks_of_ghosts.size() > 0)
           {
-            ghost_targets_data.emplace_back(index_owner[0], 0);
-            for (auto i : index_owner)
+            ghost_targets_data.emplace_back(owning_ranks_of_ghosts[0], 0);
+            for (auto i : owning_ranks_of_ghosts)
               {
                 Assert(i >= ghost_targets_data.back().first,
                        ExcInternalError(
-                         "Expect result of compute_index_owner to be sorted"));
+                         "Expect result of ConsensusAlgorithmProcess to be "
+                         "sorted"));
                 if (i == ghost_targets_data.back().first)
                   ghost_targets_data.back().second++;
                 else
                   ghost_targets_data.emplace_back(i, 1);
               }
           }
-
-        n_ghost_targets = ghost_targets_data.size();
-      }
-      // find the processes that want to import to me
-      {
-        std::vector<int> send_buffer(n_procs, 0);
-        std::vector<int> receive_buffer(n_procs, 0);
-        for (unsigned int i = 0; i < n_ghost_targets; i++)
-          send_buffer[ghost_targets_data[i].first] =
-            ghost_targets_data[i].second;
-
-        const int ierr = MPI_Alltoall(send_buffer.data(),
-                                      1,
-                                      MPI_INT,
-                                      receive_buffer.data(),
-                                      1,
-                                      MPI_INT,
-                                      communicator);
-        AssertThrowMPI(ierr);
-
-        // allocate memory for import data
-        std::vector<std::pair<unsigned int, unsigned int>> import_targets_temp;
-        n_import_indices_data = 0;
-        for (unsigned int i = 0; i < n_procs; i++)
-          if (receive_buffer[i] > 0)
-            {
-              n_import_indices_data += receive_buffer[i];
-              import_targets_temp.emplace_back(i, receive_buffer[i]);
-            }
-        // copy, don't move, to get deterministic memory usage.
-        import_targets_data = import_targets_temp;
       }
 
-      // now that we know how many indices each process will receive from
-      // ghosts, send and receive indices for import data. non-blocking receives
-      // and blocking sends
-      {
-        std::vector<types::global_dof_index> expanded_import_indices(
-          n_import_indices_data);
-        unsigned int             current_index_start = 0;
-        std::vector<MPI_Request> import_requests(import_targets_data.size() +
-                                                 n_ghost_targets);
-        for (unsigned int i = 0; i < import_targets_data.size(); i++)
+      // find how much the individual processes that want import from me
+      std::map<unsigned int, IndexSet> import_data = process.get_requesters();
+
+      // count import requests and setup the compressed indices
+      n_import_indices_data = 0;
+      import_targets_data   = {};
+      import_targets_data.reserve(import_data.size());
+      import_indices_chunks_by_rank_data = {};
+      import_indices_chunks_by_rank_data.reserve(import_data.size());
+      import_indices_chunks_by_rank_data.resize(1);
+      for (const auto &i : import_data)
+        if (i.second.n_elements() > 0)
           {
-            const int ierr =
-              MPI_Irecv(&expanded_import_indices[current_index_start],
-                        import_targets_data[i].second,
-                        DEAL_II_DOF_INDEX_MPI_TYPE,
-                        import_targets_data[i].first,
-                        import_targets_data[i].first,
-                        communicator,
-                        &import_requests[i]);
-            AssertThrowMPI(ierr);
-            current_index_start += import_targets_data[i].second;
+            import_targets_data.emplace_back(i.first, i.second.n_elements());
+            n_import_indices_data += i.second.n_elements();
+            import_indices_chunks_by_rank_data.push_back(
+              import_indices_chunks_by_rank_data.back() +
+              i.second.n_intervals());
           }
-        AssertDimension(current_index_start, n_import_indices_data);
 
-        // use non-blocking send for ghost indices stored in
-        // expanded_ghost_indices
-        std::vector<types::global_dof_index> expanded_ghost_indices;
-        if (n_ghost_indices_data > 0)
-          ghost_indices_data.fill_index_vector(expanded_ghost_indices);
+      // transform import indices to local index space
+      import_indices_data = {};
+      import_indices_data.reserve(import_indices_chunks_by_rank_data.back());
+      for (const auto &i : import_data)
+        {
+          Assert((i.second & locally_owned_range_data) == i.second,
+                 ExcInternalError("Requested indices must be in local range"));
+          for (auto interval = i.second.begin_intervals();
+               interval != i.second.end_intervals();
+               ++interval)
+            import_indices_data.emplace_back(*interval->begin() -
+                                               local_range_data.first,
+                                             interval->last() + 1 -
+                                               local_range_data.first);
+        }
 
-        current_index_start = 0;
-        for (unsigned int i = 0; i < n_ghost_targets; i++)
-          {
-            const int ierr =
-              MPI_Isend(&expanded_ghost_indices[current_index_start],
-                        ghost_targets_data[i].second,
-                        DEAL_II_DOF_INDEX_MPI_TYPE,
-                        ghost_targets_data[i].first,
-                        my_pid,
-                        communicator,
-                        &import_requests[import_targets_data.size() + i]);
-            AssertThrowMPI(ierr);
-            current_index_start += ghost_targets_data[i].second;
-          }
-        AssertDimension(current_index_start, n_ghost_indices_data);
+#  ifdef DEBUG
 
-        // wait for all import from other processes to be done
-        if (import_requests.size() > 0)
-          {
-            const int ierr = MPI_Waitall(import_requests.size(),
-                                         import_requests.data(),
-                                         MPI_STATUSES_IGNORE);
-            AssertThrowMPI(ierr);
-          }
+      // simple check: the number of processors to which we want to send
+      // ghosts and the processors to which ghosts reference should be the
+      // same
+      AssertDimension(
+        Utilities::MPI::sum(import_targets_data.size(), communicator),
+        Utilities::MPI::sum(ghost_targets_data.size(), communicator));
+
+      // simple check: the number of indices to exchange should match from the
+      // ghost indices side and the import indices side
+      AssertDimension(Utilities::MPI::sum(n_import_indices_data, communicator),
+                      Utilities::MPI::sum(n_ghost_indices_data, communicator));
+
+      // expensive check that the communication channel is sane -> do a ghost
+      // exchange step and see whether the ghost indices sent to us by other
+      // processes (ghost_indices) are the same as we hold locally
+      // (ghost_indices_ref).
+      std::vector<types::global_dof_index> ghost_indices_ref;
+      ghost_indices_data.fill_index_vector(ghost_indices_ref);
+      AssertDimension(ghost_indices_ref.size(), n_ghost_indices());
+      std::vector<types::global_dof_index> indices_to_send(n_import_indices());
+      std::vector<types::global_dof_index> ghost_indices(n_ghost_indices());
+      std::vector<types::global_dof_index> my_indices;
+      locally_owned_range_data.fill_index_vector(my_indices);
+      std::vector<MPI_Request> requests;
+      n_ghost_indices_in_larger_set = n_ghost_indices_data;
+      export_to_ghosted_array_start(127,
+                                    ArrayView<const types::global_dof_index>(
+                                      my_indices.data(), my_indices.size()),
+                                    make_array_view(indices_to_send),
+                                    make_array_view(ghost_indices),
+                                    requests);
+      export_to_ghosted_array_finish(make_array_view(ghost_indices), requests);
+      int       flag = 0;
+      const int ierr = MPI_Testall(requests.size(),
+                                   requests.data(),
+                                   &flag,
+                                   MPI_STATUSES_IGNORE);
+      AssertThrowMPI(ierr);
+      Assert(flag == 1,
+             ExcMessage(
+               "MPI found unfinished requests. Check communication setup"));
 
-        // transform import indices to local index space and compress
-        // contiguous indices in form of ranges
-        {
-          import_indices_chunks_by_rank_data.resize(import_targets_data.size() +
-                                                    1);
-          import_indices_chunks_by_rank_data[0] = 0;
-          // a vector which stores import indices as ranges [a_i,b_i)
-          std::vector<std::pair<unsigned int, unsigned int>>
-                       compressed_import_indices;
-          unsigned int shift = 0;
-          for (unsigned int p = 0; p < import_targets_data.size(); ++p)
-            {
-              types::global_dof_index last_index =
-                numbers::invalid_dof_index - 1;
-              for (unsigned int ii = 0; ii < import_targets_data[p].second;
-                   ++ii)
-                {
-                  // index in expanded_import_indices for a pair (p,ii):
-                  const unsigned int i = shift + ii;
-                  Assert(expanded_import_indices[i] >= local_range_data.first &&
-                           expanded_import_indices[i] < local_range_data.second,
-                         ExcIndexRange(expanded_import_indices[i],
-                                       local_range_data.first,
-                                       local_range_data.second));
-                  // local index starting from the beginning of locally owned
-                  // DoFs:
-                  types::global_dof_index new_index =
-                    (expanded_import_indices[i] - local_range_data.first);
-                  Assert(new_index < numbers::invalid_unsigned_int,
-                         ExcNotImplemented());
-                  if (new_index == last_index + 1)
-                    // if contiguous, increment the end of last range:
-                    compressed_import_indices.back().second++;
-                  else
-                    // otherwise start a new range:
-                    compressed_import_indices.emplace_back(new_index,
-                                                           new_index + 1);
-                  last_index = new_index;
-                }
-              shift += import_targets_data[p].second;
-              import_indices_chunks_by_rank_data[p + 1] =
-                compressed_import_indices.size();
-            }
-          import_indices_data = compressed_import_indices;
+      for (unsigned int i = 0; i < ghost_indices.size(); ++i)
+        AssertDimension(ghost_indices[i], ghost_indices_ref[i]);
 
-          // sanity check
-#  ifdef DEBUG
-          const types::global_dof_index n_local_dofs =
-            local_range_data.second - local_range_data.first;
-          for (const auto &range : import_indices_data)
-            {
-              AssertIndexRange(range.first, n_local_dofs);
-              AssertIndexRange(range.second - 1, n_local_dofs);
-            }
 #  endif
-        }
-      }
+
 #endif // #ifdef DEAL_II_WITH_MPI
 
       if (larger_ghost_index_set.size() == 0)
@@ -533,7 +473,7 @@ namespace Utilities
       return memory;
     }
 
-  } // end of namespace MPI
+  } // namespace MPI
 
 } // end of namespace Utilities
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.