]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Move ConsensusAlgorithmsPayload into a source file.
authorDavid Wells <drwells@email.unc.edu>
Fri, 18 Feb 2022 15:53:33 +0000 (10:53 -0500)
committerDavid Wells <drwells@email.unc.edu>
Fri, 18 Feb 2022 16:32:58 +0000 (11:32 -0500)
include/deal.II/base/mpi_compute_index_owner_internal.h
source/base/mpi_compute_index_owner_internal.cc

index 5ff32c4d98e41bdba6e1a5e954a39064a9e2c153..21eebfcfe7747e3f57de6f0d014aaaf69867c6db 100644 (file)
@@ -279,18 +279,7 @@ namespace Utilities
                                      const IndexSet &indices_to_look_up,
                                      const MPI_Comm &comm,
                                      std::vector<unsigned int> &owning_ranks,
-                                     const bool track_index_requests = false)
-            : owned_indices(owned_indices)
-            , indices_to_look_up(indices_to_look_up)
-            , comm(comm)
-            , my_rank(this_mpi_process(comm))
-            , n_procs(n_mpi_processes(comm))
-            , track_index_requests(track_index_requests)
-            , owning_ranks(owning_ranks)
-          {
-            dict.reinit(owned_indices, comm);
-            requesters.resize(dict.actually_owning_rank_list.size());
-          }
+                                     const bool track_index_requests = false);
 
           /**
            * The index space which describes the locally owned space.
@@ -377,78 +366,14 @@ namespace Utilities
             const unsigned int                                     other_rank,
             const std::vector<std::pair<types::global_dof_index,
                                         types::global_dof_index>> &buffer_recv,
-            std::vector<unsigned int> &request_buffer) override
-          {
-            unsigned int owner_index = 0;
-            for (const auto &interval : buffer_recv)
-              for (auto i = interval.first; i < interval.second; ++i)
-                {
-                  const unsigned int actual_owner =
-                    dict.actually_owning_ranks[i - dict.local_range.first];
-                  request_buffer.push_back(actual_owner);
-
-                  if (track_index_requests)
-                    append_index_origin(i, owner_index, other_rank);
-                }
-          }
+            std::vector<unsigned int> &request_buffer) override;
 
           /**
            * Implementation of
            * Utilities::MPI::ConsensusAlgorithms::Process::compute_targets().
            */
           virtual std::vector<unsigned int>
-          compute_targets() override
-          {
-            std::vector<unsigned int> targets;
-
-            // 1) collect relevant processes and process local dict entries
-            {
-              unsigned int index       = 0;
-              unsigned int owner_index = 0;
-              for (auto i : indices_to_look_up)
-                {
-                  unsigned int other_rank = dict.dof_to_dict_rank(i);
-                  if (other_rank == my_rank)
-                    {
-                      owning_ranks[index] =
-                        dict.actually_owning_ranks[i - dict.local_range.first];
-                      if (track_index_requests)
-                        append_index_origin(i, owner_index, my_rank);
-                    }
-                  else if (targets.empty() || targets.back() != other_rank)
-                    targets.push_back(other_rank);
-                  index++;
-                }
-            }
-
-
-            for (auto i : targets)
-              {
-                recv_indices[i]                    = {};
-                indices_to_look_up_by_dict_rank[i] = {};
-              }
-
-            // 3) collect indices for each process
-            {
-              unsigned int index = 0;
-              for (auto i : indices_to_look_up)
-                {
-                  unsigned int other_rank = dict.dof_to_dict_rank(i);
-                  if (other_rank != my_rank)
-                    {
-                      recv_indices[other_rank].push_back(index);
-                      indices_to_look_up_by_dict_rank[other_rank].push_back(i);
-                    }
-                  index++;
-                }
-            }
-
-            Assert(targets.size() == recv_indices.size() &&
-                     targets.size() == indices_to_look_up_by_dict_rank.size(),
-                   ExcMessage("Size does not match!"));
-
-            return targets;
-          }
+          compute_targets() override;
 
           /**
            * Implementation of
@@ -458,20 +383,7 @@ namespace Utilities
           create_request(const unsigned int other_rank,
                          std::vector<std::pair<types::global_dof_index,
                                                types::global_dof_index>>
-                           &send_buffer) override
-          {
-            // create index set and compress data to be sent
-            auto    &indices_i = indices_to_look_up_by_dict_rank[other_rank];
-            IndexSet is(dict.size);
-            is.add_indices(indices_i.begin(), indices_i.end());
-            is.compress();
-
-            for (auto interval = is.begin_intervals();
-                 interval != is.end_intervals();
-                 ++interval)
-              send_buffer.emplace_back(*interval->begin(),
-                                       interval->last() + 1);
-          }
+                           &send_buffer) override;
 
           /**
            * Implementation of
@@ -479,16 +391,7 @@ namespace Utilities
            */
           virtual void
           read_answer(const unsigned int               other_rank,
-                      const std::vector<unsigned int> &recv_buffer) override
-          {
-            Assert(recv_buffer.size() == recv_indices[other_rank].size(),
-                   ExcInternalError());
-            Assert(recv_indices[other_rank].size() == recv_buffer.size(),
-                   ExcMessage("Sizes do not match!"));
-
-            for (unsigned int j = 0; j < recv_indices[other_rank].size(); ++j)
-              owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
-          }
+                      const std::vector<unsigned int> &recv_buffer) override;
 
           /**
            * Resolve the origin of the requests by sending the information
@@ -500,163 +403,7 @@ namespace Utilities
            *         are requested from the current rank
            */
           std::map<unsigned int, IndexSet>
-          get_requesters()
-          {
-            Assert(track_index_requests,
-                   ExcMessage("Must enable index range tracking in "
-                              "constructor of ConsensusAlgorithmProcess"));
-
-            std::map<unsigned int, dealii::IndexSet> requested_indices;
-
-#ifdef DEAL_II_WITH_MPI
-
-            static CollectiveMutex      mutex;
-            CollectiveMutex::ScopedLock lock(mutex, comm);
-
-            const int mpi_tag = Utilities::MPI::internal::Tags::
-              consensus_algorithm_payload_get_requesters;
-
-            // reserve enough slots for the requests ahead; depending on
-            // whether the owning rank is one of the requesters or not, we
-            // might have one less requests to execute, so fill the requests
-            // on demand.
-            std::vector<MPI_Request> send_requests;
-            send_requests.reserve(requesters.size());
-
-            // We use an integer vector for the data exchange. Since we send
-            // data associated to intervals with different requesters, we will
-            // need to send (a) the MPI rank of the requester, (b) the number
-            // of intervals directed to this requester, and (c) a list of
-            // intervals, i.e., two integers per interval. The number of items
-            // sent in total can be deduced both via the MPI status message at
-            // the receiver site as well as be counting the buckets from
-            // different requesters.
-            std::vector<std::vector<unsigned int>> send_data(requesters.size());
-            for (unsigned int i = 0; i < requesters.size(); ++i)
-              {
-                // special code for our own indices
-                if (dict.actually_owning_rank_list[i] == my_rank)
-                  {
-                    for (const auto &j : requesters[i])
-                      {
-                        const types::global_dof_index index_offset =
-                          dict.get_index_offset(my_rank);
-                        IndexSet &my_index_set = requested_indices[j.first];
-                        my_index_set.set_size(owned_indices.size());
-                        for (const auto &interval : j.second)
-                          my_index_set.add_range(index_offset + interval.first,
-                                                 index_offset +
-                                                   interval.second);
-                      }
-                  }
-                else
-                  {
-                    for (const auto &j : requesters[i])
-                      {
-                        send_data[i].push_back(j.first);
-                        send_data[i].push_back(j.second.size());
-                        for (const auto &interval : j.second)
-                          {
-                            send_data[i].push_back(interval.first);
-                            send_data[i].push_back(interval.second);
-                          }
-                      }
-                    send_requests.push_back(MPI_Request());
-                    const int ierr =
-                      MPI_Isend(send_data[i].data(),
-                                send_data[i].size(),
-                                MPI_UNSIGNED,
-                                dict.actually_owning_rank_list[i],
-                                mpi_tag,
-                                comm,
-                                &send_requests.back());
-                    AssertThrowMPI(ierr);
-                  }
-              }
-
-            // receive the data
-            for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices;
-                 ++c)
-              {
-                // wait for an incoming message
-                MPI_Status status;
-                int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
-                AssertThrowMPI(ierr);
-
-                // retrieve size of incoming message
-                int number_amount;
-                ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
-                AssertThrowMPI(ierr);
-
-                // receive message
-                Assert(number_amount % 2 == 0, ExcInternalError());
-                std::vector<std::pair<unsigned int, unsigned int>> buffer(
-                  number_amount / 2);
-                ierr = MPI_Recv(buffer.data(),
-                                number_amount,
-                                MPI_UNSIGNED,
-                                status.MPI_SOURCE,
-                                status.MPI_TAG,
-                                comm,
-                                &status);
-                AssertThrowMPI(ierr);
-
-                // unpack the message and translate the dictionary-local
-                // indices coming via MPI to the global index range
-                const types::global_dof_index index_offset =
-                  dict.get_index_offset(status.MPI_SOURCE);
-                unsigned int offset = 0;
-                while (offset < buffer.size())
-                  {
-                    AssertIndexRange(offset + buffer[offset].second,
-                                     buffer.size());
-
-                    IndexSet my_index_set(owned_indices.size());
-                    for (unsigned int i = offset + 1;
-                         i < offset + buffer[offset].second + 1;
-                         ++i)
-                      my_index_set.add_range(index_offset + buffer[i].first,
-                                             index_offset + buffer[i].second);
-
-                    // the underlying index set is able to merge ranges coming
-                    // from different ranks due to the partitioning in the
-                    // dictionary
-                    IndexSet &index_set =
-                      requested_indices[buffer[offset].first];
-                    if (index_set.size() == 0)
-                      index_set.set_size(owned_indices.size());
-                    index_set.add_indices(my_index_set);
-
-                    offset += buffer[offset].second + 1;
-                  }
-                AssertDimension(offset, buffer.size());
-              }
-
-            if (send_requests.size() > 0)
-              {
-                const auto ierr = MPI_Waitall(send_requests.size(),
-                                              send_requests.data(),
-                                              MPI_STATUSES_IGNORE);
-                AssertThrowMPI(ierr);
-              }
-
-
-#  ifdef DEBUG
-            for (const auto &it : requested_indices)
-              {
-                IndexSet copy_set = it.second;
-                copy_set.subtract_set(owned_indices);
-                Assert(copy_set.n_elements() == 0,
-                       ExcInternalError(
-                         "The indices requested from the current "
-                         "MPI rank should be locally owned here!"));
-              }
-#  endif
-
-#endif // DEAL_II_WITH_MPI
-
-            return requested_indices;
-          }
+          get_requesters();
 
         private:
           /**
@@ -674,31 +421,8 @@ namespace Utilities
            */
           void
           append_index_origin(const types::global_dof_index index,
-                              unsigned int                 &owner_index,
-                              const unsigned int            rank_of_request)
-          {
-            // remember who requested which index. We want to use an
-            // std::vector with simple addressing, via a good guess from the
-            // preceding index, rather than std::map, because this is an inner
-            // loop and it avoids the map lookup in every iteration
-            const unsigned int rank_of_owner =
-              dict.actually_owning_ranks[index - dict.local_range.first];
-            owner_index =
-              dict.get_owning_rank_index(rank_of_owner, owner_index);
-            if (requesters[owner_index].empty() ||
-                requesters[owner_index].back().first != rank_of_request)
-              requesters[owner_index].emplace_back(
-                rank_of_request,
-                std::vector<std::pair<unsigned int, unsigned int>>());
-            if (requesters[owner_index].back().second.empty() ||
-                requesters[owner_index].back().second.back().second !=
-                  index - dict.local_range.first)
-              requesters[owner_index].back().second.emplace_back(
-                index - dict.local_range.first,
-                index - dict.local_range.first + 1);
-            else
-              ++requesters[owner_index].back().second.back().second;
-          }
+                              unsigned int &                owner_index,
+                              const unsigned int            rank_of_request);
         };
 
       } // namespace ComputeIndexOwner
index e6bc565c03237eccf84889f14d936ae8d34bdc1e..159e746234f33170ff1448350d26b7d503607134 100644 (file)
@@ -368,6 +368,324 @@ namespace Utilities
         }
 
 
+        ConsensusAlgorithmsPayload::ConsensusAlgorithmsPayload(
+          const IndexSet &           owned_indices,
+          const IndexSet &           indices_to_look_up,
+          const MPI_Comm &           comm,
+          std::vector<unsigned int> &owning_ranks,
+          const bool                 track_index_requests)
+          : owned_indices(owned_indices)
+          , indices_to_look_up(indices_to_look_up)
+          , comm(comm)
+          , my_rank(this_mpi_process(comm))
+          , n_procs(n_mpi_processes(comm))
+          , track_index_requests(track_index_requests)
+          , owning_ranks(owning_ranks)
+        {
+          dict.reinit(owned_indices, comm);
+          requesters.resize(dict.actually_owning_rank_list.size());
+        }
+
+
+
+        void
+        ConsensusAlgorithmsPayload::answer_request(
+          const unsigned int                                     other_rank,
+          const std::vector<std::pair<types::global_dof_index,
+                                      types::global_dof_index>> &buffer_recv,
+          std::vector<unsigned int> &                            request_buffer)
+        {
+          unsigned int owner_index = 0;
+          for (const auto &interval : buffer_recv)
+            for (auto i = interval.first; i < interval.second; ++i)
+              {
+                const unsigned int actual_owner =
+                  dict.actually_owning_ranks[i - dict.local_range.first];
+                request_buffer.push_back(actual_owner);
+
+                if (track_index_requests)
+                  append_index_origin(i, owner_index, other_rank);
+              }
+        }
+
+
+
+        std::vector<unsigned int>
+        ConsensusAlgorithmsPayload::compute_targets()
+        {
+          std::vector<unsigned int> targets;
+
+          // 1) collect relevant processes and process local dict entries
+          {
+            unsigned int index       = 0;
+            unsigned int owner_index = 0;
+            for (auto i : indices_to_look_up)
+              {
+                unsigned int other_rank = dict.dof_to_dict_rank(i);
+                if (other_rank == my_rank)
+                  {
+                    owning_ranks[index] =
+                      dict.actually_owning_ranks[i - dict.local_range.first];
+                    if (track_index_requests)
+                      append_index_origin(i, owner_index, my_rank);
+                  }
+                else if (targets.empty() || targets.back() != other_rank)
+                  targets.push_back(other_rank);
+                index++;
+              }
+          }
+
+
+          for (auto i : targets)
+            {
+              recv_indices[i]                    = {};
+              indices_to_look_up_by_dict_rank[i] = {};
+            }
+
+          // 3) collect indices for each process
+          {
+            unsigned int index = 0;
+            for (auto i : indices_to_look_up)
+              {
+                unsigned int other_rank = dict.dof_to_dict_rank(i);
+                if (other_rank != my_rank)
+                  {
+                    recv_indices[other_rank].push_back(index);
+                    indices_to_look_up_by_dict_rank[other_rank].push_back(i);
+                  }
+                index++;
+              }
+          }
+
+          Assert(targets.size() == recv_indices.size() &&
+                   targets.size() == indices_to_look_up_by_dict_rank.size(),
+                 ExcMessage("Size does not match!"));
+
+          return targets;
+        }
+
+
+
+        void
+        ConsensusAlgorithmsPayload::create_request(
+          const unsigned int                               other_rank,
+          std::vector<std::pair<types::global_dof_index,
+                                types::global_dof_index>> &send_buffer)
+        {
+          // create index set and compress data to be sent
+          auto &   indices_i = indices_to_look_up_by_dict_rank[other_rank];
+          IndexSet is(dict.size);
+          is.add_indices(indices_i.begin(), indices_i.end());
+          is.compress();
+
+          for (auto interval = is.begin_intervals();
+               interval != is.end_intervals();
+               ++interval)
+            send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
+        }
+
+
+
+        void
+        ConsensusAlgorithmsPayload::read_answer(
+          const unsigned int               other_rank,
+          const std::vector<unsigned int> &recv_buffer)
+        {
+          Assert(recv_buffer.size() == recv_indices[other_rank].size(),
+                 ExcInternalError());
+          Assert(recv_indices[other_rank].size() == recv_buffer.size(),
+                 ExcMessage("Sizes do not match!"));
+
+          for (unsigned int j = 0; j < recv_indices[other_rank].size(); ++j)
+            owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
+        }
+
+
+
+        std::map<unsigned int, IndexSet>
+        ConsensusAlgorithmsPayload::get_requesters()
+        {
+          Assert(track_index_requests,
+                 ExcMessage("Must enable index range tracking in "
+                            "constructor of ConsensusAlgorithmProcess"));
+
+          std::map<unsigned int, dealii::IndexSet> requested_indices;
+
+#ifdef DEAL_II_WITH_MPI
+
+          static CollectiveMutex      mutex;
+          CollectiveMutex::ScopedLock lock(mutex, comm);
+
+          const int mpi_tag = Utilities::MPI::internal::Tags::
+            consensus_algorithm_payload_get_requesters;
+
+          // reserve enough slots for the requests ahead; depending on
+          // whether the owning rank is one of the requesters or not, we
+          // might have one less requests to execute, so fill the requests
+          // on demand.
+          std::vector<MPI_Request> send_requests;
+          send_requests.reserve(requesters.size());
+
+          // We use an integer vector for the data exchange. Since we send
+          // data associated to intervals with different requesters, we will
+          // need to send (a) the MPI rank of the requester, (b) the number
+          // of intervals directed to this requester, and (c) a list of
+          // intervals, i.e., two integers per interval. The number of items
+          // sent in total can be deduced both via the MPI status message at
+          // the receiver site as well as be counting the buckets from
+          // different requesters.
+          std::vector<std::vector<unsigned int>> send_data(requesters.size());
+          for (unsigned int i = 0; i < requesters.size(); ++i)
+            {
+              // special code for our own indices
+              if (dict.actually_owning_rank_list[i] == my_rank)
+                {
+                  for (const auto &j : requesters[i])
+                    {
+                      const types::global_dof_index index_offset =
+                        dict.get_index_offset(my_rank);
+                      IndexSet &my_index_set = requested_indices[j.first];
+                      my_index_set.set_size(owned_indices.size());
+                      for (const auto &interval : j.second)
+                        my_index_set.add_range(index_offset + interval.first,
+                                               index_offset + interval.second);
+                    }
+                }
+              else
+                {
+                  for (const auto &j : requesters[i])
+                    {
+                      send_data[i].push_back(j.first);
+                      send_data[i].push_back(j.second.size());
+                      for (const auto &interval : j.second)
+                        {
+                          send_data[i].push_back(interval.first);
+                          send_data[i].push_back(interval.second);
+                        }
+                    }
+                  send_requests.push_back(MPI_Request());
+                  const int ierr = MPI_Isend(send_data[i].data(),
+                                             send_data[i].size(),
+                                             MPI_UNSIGNED,
+                                             dict.actually_owning_rank_list[i],
+                                             mpi_tag,
+                                             comm,
+                                             &send_requests.back());
+                  AssertThrowMPI(ierr);
+                }
+            }
+
+          // receive the data
+          for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
+            {
+              // wait for an incoming message
+              MPI_Status status;
+              int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
+              AssertThrowMPI(ierr);
+
+              // retrieve size of incoming message
+              int number_amount;
+              ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
+              AssertThrowMPI(ierr);
+
+              // receive message
+              Assert(number_amount % 2 == 0, ExcInternalError());
+              std::vector<std::pair<unsigned int, unsigned int>> buffer(
+                number_amount / 2);
+              ierr = MPI_Recv(buffer.data(),
+                              number_amount,
+                              MPI_UNSIGNED,
+                              status.MPI_SOURCE,
+                              status.MPI_TAG,
+                              comm,
+                              &status);
+              AssertThrowMPI(ierr);
+
+              // unpack the message and translate the dictionary-local
+              // indices coming via MPI to the global index range
+              const types::global_dof_index index_offset =
+                dict.get_index_offset(status.MPI_SOURCE);
+              unsigned int offset = 0;
+              while (offset < buffer.size())
+                {
+                  AssertIndexRange(offset + buffer[offset].second,
+                                   buffer.size());
+
+                  IndexSet my_index_set(owned_indices.size());
+                  for (unsigned int i = offset + 1;
+                       i < offset + buffer[offset].second + 1;
+                       ++i)
+                    my_index_set.add_range(index_offset + buffer[i].first,
+                                           index_offset + buffer[i].second);
+
+                  // the underlying index set is able to merge ranges coming
+                  // from different ranks due to the partitioning in the
+                  // dictionary
+                  IndexSet &index_set = requested_indices[buffer[offset].first];
+                  if (index_set.size() == 0)
+                    index_set.set_size(owned_indices.size());
+                  index_set.add_indices(my_index_set);
+
+                  offset += buffer[offset].second + 1;
+                }
+              AssertDimension(offset, buffer.size());
+            }
+
+          if (send_requests.size() > 0)
+            {
+              const auto ierr = MPI_Waitall(send_requests.size(),
+                                            send_requests.data(),
+                                            MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+            }
+
+
+#  ifdef DEBUG
+          for (const auto &it : requested_indices)
+            {
+              IndexSet copy_set = it.second;
+              copy_set.subtract_set(owned_indices);
+              Assert(copy_set.n_elements() == 0,
+                     ExcInternalError(
+                       "The indices requested from the current "
+                       "MPI rank should be locally owned here!"));
+            }
+#  endif
+
+#endif // DEAL_II_WITH_MPI
+
+          return requested_indices;
+        }
+
+
+
+        void
+        ConsensusAlgorithmsPayload::append_index_origin(
+          const types::global_dof_index index,
+          unsigned int &                owner_index,
+          const unsigned int            rank_of_request)
+        {
+          // remember who requested which index. We want to use an
+          // std::vector with simple addressing, via a good guess from the
+          // preceding index, rather than std::map, because this is an inner
+          // loop and it avoids the map lookup in every iteration
+          const unsigned int rank_of_owner =
+            dict.actually_owning_ranks[index - dict.local_range.first];
+          owner_index = dict.get_owning_rank_index(rank_of_owner, owner_index);
+          if (requesters[owner_index].empty() ||
+              requesters[owner_index].back().first != rank_of_request)
+            requesters[owner_index].emplace_back(
+              rank_of_request,
+              std::vector<std::pair<unsigned int, unsigned int>>());
+          if (requesters[owner_index].back().second.empty() ||
+              requesters[owner_index].back().second.back().second !=
+                index - dict.local_range.first)
+            requesters[owner_index].back().second.emplace_back(
+              index - dict.local_range.first,
+              index - dict.local_range.first + 1);
+          else
+            ++requesters[owner_index].back().second.back().second;
+        }
       } // namespace ComputeIndexOwner
     }   // namespace internal
   }     // namespace MPI

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.