From 98b983a535e7983296f27725165ec1ff9ecba76d Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Tue, 17 Sep 2019 09:06:15 +0200 Subject: [PATCH] Move ComputeIndexOwner details into separate header file --- .../base/mpi_compute_index_owner_internal.h | 359 ++++++++++++++++++ source/base/mpi.cc | 324 +--------------- 2 files changed, 362 insertions(+), 321 deletions(-) create mode 100644 include/deal.II/base/mpi_compute_index_owner_internal.h diff --git a/include/deal.II/base/mpi_compute_index_owner_internal.h b/include/deal.II/base/mpi_compute_index_owner_internal.h new file mode 100644 index 0000000000..1244873a78 --- /dev/null +++ b/include/deal.II/base/mpi_compute_index_owner_internal.h @@ -0,0 +1,359 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_base_mpi_compute_index_owner_internal_h +#define dealii_base_mpi_compute_index_owner_internal_h + +#include + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace Utilities +{ + namespace MPI + { + namespace internal + { + /** + * An internal namespace used for Utilities::MPI::compute_index_owner(). + */ + namespace ComputeIndexOwner + { + struct Dictionary + { + static const unsigned int tag_setup = 11; + + std::vector actually_owning_ranks; + + types::global_dof_index dofs_per_process; + std::pair + local_range; + types::global_dof_index local_size; + types::global_dof_index size; + + void + reinit(const IndexSet &owned_indices, const MPI_Comm &comm) + { + this->partition(owned_indices, comm); + +#ifdef DEAL_II_WITH_MPI + unsigned int my_rank = this_mpi_process(comm); + + types::global_dof_index dic_local_rececived = 0; + std::map relevant_procs_map; + + // 2) collect relevant processes and process local dict entries + { + std::vector relevant_procs; + for (auto i : owned_indices) + { + unsigned int other_rank = this->dof_to_dict_rank(i); + if (other_rank == my_rank) + { + this->actually_owning_ranks[i - this->local_range.first] = + my_rank; + dic_local_rececived++; + } + else if (relevant_procs.empty() || + relevant_procs.back() != other_rank) + relevant_procs.push_back(other_rank); + } + + { + unsigned int c = 0; + for (auto i : relevant_procs) + relevant_procs_map[i] = c++; + } + } + + const unsigned int n_relevant_procs = relevant_procs_map.size(); + std::vector>> + buffers(n_relevant_procs); + std::vector request(n_relevant_procs); + + // 3) send messages with local dofs to the right dict process + { + std::vector> temp( + n_relevant_procs); + + // collect dofs of each dict process + for (auto i : owned_indices) + { + unsigned int other_rank = this->dof_to_dict_rank(i); + if (other_rank != my_rank) + temp[relevant_procs_map[other_rank]].push_back(i); + } + + // send dofs to each process + for (auto rank_pair : relevant_procs_map) + { + const int rank = rank_pair.first; + const int index = rank_pair.second; + + // create index set and compress data to be sent + auto & indices_i = temp[index]; + IndexSet is(this->size); + is.add_indices(indices_i.begin(), indices_i.end()); + is.compress(); + + // translate index set to a list of pairs + auto &buffer = buffers[index]; + for (auto interval = is.begin_intervals(); + interval != is.end_intervals(); + interval++) + buffer.emplace_back(*interval->begin(), + interval->last() + 1); + + // send data + const auto ierr = MPI_Isend(buffer.data(), + buffer.size() * 2, + DEAL_II_DOF_INDEX_MPI_TYPE, + rank, + tag_setup, + comm, + &request[index]); + AssertThrowMPI(ierr); + } + } + + + // 4) receive messages until all dofs in dict are processed + while (this->local_size != dic_local_rececived) + { + // wait for an incoming message + MPI_Status status; + auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_setup, comm, &status); + AssertThrowMPI(ierr); + + // retrieve size of incoming message + int number_amount; + ierr = MPI_Get_count(&status, + DEAL_II_DOF_INDEX_MPI_TYPE, + &number_amount); + AssertThrowMPI(ierr); + + const auto other_rank = status.MPI_SOURCE; + + // receive message + Assert(number_amount % 2 == 0, ExcInternalError()); + std::vector< + std::pair> + buffer(number_amount / 2); + ierr = MPI_Recv(buffer.data(), + number_amount, + DEAL_II_DOF_INDEX_MPI_TYPE, + other_rank, + tag_setup, + comm, + &status); + AssertThrowMPI(ierr); + + // process message: loop over all intervals + for (auto interval : buffer) + for (types::global_dof_index i = interval.first; + i < interval.second; + i++) + { + this->actually_owning_ranks[i - this->local_range.first] = + other_rank; + dic_local_rececived++; + } + } + + // 5) make sure that all messages have been sent + const auto ierr = MPI_Waitall(n_relevant_procs, + request.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); +#else + (void)owned_indices; + (void)comm; +#endif + } + + unsigned int + dof_to_dict_rank(const types::global_dof_index i) + { + return i / dofs_per_process; + } + + private: + void + partition(const IndexSet &owned_indices, const MPI_Comm &comm) + { +#ifdef DEAL_II_WITH_MPI + const unsigned int n_procs = n_mpi_processes(comm); + const unsigned int my_rank = this_mpi_process(comm); + + size = owned_indices.size(); + dofs_per_process = (size + n_procs - 1) / n_procs; + local_range.first = std::min(dofs_per_process * my_rank, size); + local_range.second = + std::min(dofs_per_process * (my_rank + 1), size); + local_size = local_range.second - local_range.first; + + actually_owning_ranks.resize(local_size); +#else + (void)owned_indices; + (void)comm; +#endif + } + }; + + class ConsensusAlgorithmProcess + : public dealii::Utilities::MPI:: + ConsensusAlgorithmProcess + { + public: + ConsensusAlgorithmProcess(const IndexSet &owned_indices, + const IndexSet &indices_to_look_up, + const MPI_Comm &comm, + std::vector &owning_ranks) + : owned_indices(owned_indices) + , indices_to_look_up(indices_to_look_up) + , comm(comm) + , my_rank(this_mpi_process(comm)) + , n_procs(n_mpi_processes(comm)) + , owning_ranks(owning_ranks) + { + this->dict.reinit(owned_indices, comm); + } + + const IndexSet & owned_indices; + const IndexSet & indices_to_look_up; + const MPI_Comm & comm; + const unsigned int my_rank; + const unsigned int n_procs; + std::vector &owning_ranks; + + Dictionary dict; + + std::map> temp; + std::map> recv_indices; + + virtual void + process_request( + const unsigned int other_rank, + const std::vector &buffer_recv, + std::vector & request_buffer) override + { + (void)other_rank; + Assert(buffer_recv.size() % 2 == 0, ExcInternalError()); + for (unsigned int j = 0; j < buffer_recv.size(); j += 2) + for (auto i = buffer_recv[j]; i < buffer_recv[j + 1]; i++) + request_buffer.push_back( + dict.actually_owning_ranks[i - dict.local_range.first]); + } + + virtual std::vector + compute_targets() override + { + std::vector targets; + + // 1) collect relevant processes and process local dict entries + { + unsigned int index = 0; + for (auto i : indices_to_look_up) + { + unsigned int other_rank = dict.dof_to_dict_rank(i); + if (other_rank == my_rank) + owning_ranks[index] = + dict.actually_owning_ranks[i - dict.local_range.first]; + else if (targets.empty() || targets.back() != other_rank) + targets.push_back(other_rank); + index++; + } + } + + + for (auto i : targets) + { + recv_indices[i] = {}; + temp[i] = {}; + } + + // 3) collect indices for each process + { + unsigned int index = 0; + for (auto i : indices_to_look_up) + { + unsigned int other_rank = dict.dof_to_dict_rank(i); + if (other_rank != my_rank) + { + recv_indices[other_rank].push_back(index); + temp[other_rank].push_back(i); + } + index++; + } + } + + Assert(targets.size() == recv_indices.size() && + targets.size() == temp.size(), + ExcMessage("Size does not match!")); + + return targets; + } + + virtual void + pack_recv_buffer( + const int other_rank, + std::vector &send_buffer) override + { + // create index set and compress data to be sent + auto & indices_i = temp[other_rank]; + IndexSet is(dict.size); + is.add_indices(indices_i.begin(), indices_i.end()); + is.compress(); + + for (auto interval = is.begin_intervals(); + interval != is.end_intervals(); + interval++) + { + send_buffer.push_back(*interval->begin()); + send_buffer.push_back(interval->last() + 1); + } + } + + virtual void + prepare_recv_buffer(const int other_rank, + std::vector &recv_buffer) override + { + recv_buffer.resize(recv_indices[other_rank].size()); + } + + virtual void + unpack_recv_buffer( + const int other_rank, + const std::vector &recv_buffer) override + { + Assert(recv_indices[other_rank].size() == recv_buffer.size(), + ExcMessage("Sizes do not match!")); + + for (unsigned int j = 0; j < recv_indices[other_rank].size(); j++) + owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j]; + } + }; + + } // namespace ComputeIndexOwner + } // namespace internal + } // namespace MPI +} // namespace Utilities + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/base/mpi.cc b/source/base/mpi.cc index ee579acec4..364cd5fb22 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -1404,323 +1405,6 @@ namespace Utilities - namespace ComputeIndexOwner - { - struct Dictionary - { - static const unsigned int tag_setup = 11; - - std::vector actually_owning_ranks; - - types::global_dof_index dofs_per_process; - std::pair local_range; - types::global_dof_index local_size; - types::global_dof_index size; - - void - reinit(const IndexSet &owned_indices, const MPI_Comm &comm) - { - this->partition(owned_indices, comm); - -#ifdef DEAL_II_WITH_MPI - unsigned int my_rank = this_mpi_process(comm); - - types::global_dof_index dic_local_rececived = 0; - std::map relevant_procs_map; - - // 2) collect relevant processes and process local dict entries - { - std::vector relevant_procs; - for (auto i : owned_indices) - { - unsigned int other_rank = this->dof_to_dict_rank(i); - if (other_rank == my_rank) - { - this->actually_owning_ranks[i - this->local_range.first] = - my_rank; - dic_local_rececived++; - } - else if (relevant_procs.empty() || - relevant_procs.back() != other_rank) - relevant_procs.push_back(other_rank); - } - - { - unsigned int c = 0; - for (auto i : relevant_procs) - relevant_procs_map[i] = c++; - } - } - - const unsigned int n_relevant_procs = relevant_procs_map.size(); - std::vector>> - buffers(n_relevant_procs); - std::vector request(n_relevant_procs); - - // 3) send messages with local dofs to the right dict process - { - std::vector> temp( - n_relevant_procs); - - // collect dofs of each dict process - for (auto i : owned_indices) - { - unsigned int other_rank = this->dof_to_dict_rank(i); - if (other_rank != my_rank) - temp[relevant_procs_map[other_rank]].push_back(i); - } - - // send dofs to each process - for (auto rank_pair : relevant_procs_map) - { - const int rank = rank_pair.first; - const int index = rank_pair.second; - - // create index set and compress data to be sent - auto & indices_i = temp[index]; - IndexSet is(this->size); - is.add_indices(indices_i.begin(), indices_i.end()); - is.compress(); - - // translate index set to a list of pairs - auto &buffer = buffers[index]; - for (auto interval = is.begin_intervals(); - interval != is.end_intervals(); - interval++) - buffer.emplace_back(*interval->begin(), interval->last() + 1); - - // send data - const auto ierr = MPI_Isend(buffer.data(), - buffer.size() * 2, - DEAL_II_DOF_INDEX_MPI_TYPE, - rank, - tag_setup, - comm, - &request[index]); - AssertThrowMPI(ierr); - } - } - - - // 4) receive messages until all dofs in dict are processed - while (this->local_size != dic_local_rececived) - { - // wait for an incoming message - MPI_Status status; - auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_setup, comm, &status); - AssertThrowMPI(ierr); - - // retrieve size of incoming message - int number_amount; - ierr = MPI_Get_count(&status, - DEAL_II_DOF_INDEX_MPI_TYPE, - &number_amount); - AssertThrowMPI(ierr); - - const auto other_rank = status.MPI_SOURCE; - - // receive message - Assert(number_amount % 2 == 0, ExcInternalError()); - std::vector< - std::pair> - buffer(number_amount / 2); - ierr = MPI_Recv(buffer.data(), - number_amount, - DEAL_II_DOF_INDEX_MPI_TYPE, - other_rank, - tag_setup, - comm, - &status); - AssertThrowMPI(ierr); - - // process message: loop over all intervals - for (auto interval : buffer) - for (types::global_dof_index i = interval.first; - i < interval.second; - i++) - { - this->actually_owning_ranks[i - this->local_range.first] = - other_rank; - dic_local_rececived++; - } - } - - // 5) make sure that all messages have been sent - const auto ierr = - MPI_Waitall(n_relevant_procs, request.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); -#else - (void)owned_indices; - (void)comm; -#endif - } - - unsigned int - dof_to_dict_rank(const types::global_dof_index i) - { - return i / dofs_per_process; - } - - private: - void - partition(const IndexSet &owned_indices, const MPI_Comm &comm) - { -#ifdef DEAL_II_WITH_MPI - const unsigned int n_procs = n_mpi_processes(comm); - const unsigned int my_rank = this_mpi_process(comm); - - size = owned_indices.size(); - dofs_per_process = (size + n_procs - 1) / n_procs; - local_range.first = std::min(dofs_per_process * my_rank, size); - local_range.second = std::min(dofs_per_process * (my_rank + 1), size); - local_size = local_range.second - local_range.first; - - actually_owning_ranks.resize(local_size); -#else - (void)owned_indices; - (void)comm; -#endif - } - }; - - class ConsensusAlgorithmProcess - : public dealii::Utilities::MPI:: - ConsensusAlgorithmProcess - { - public: - ConsensusAlgorithmProcess(const IndexSet & owned_indices, - const IndexSet & indices_to_look_up, - const MPI_Comm & comm, - std::vector &owning_ranks) - : owned_indices(owned_indices) - , indices_to_look_up(indices_to_look_up) - , comm(comm) - , my_rank(this_mpi_process(comm)) - , n_procs(n_mpi_processes(comm)) - , owning_ranks(owning_ranks) - { - this->dict.reinit(owned_indices, comm); - } - - const IndexSet & owned_indices; - const IndexSet & indices_to_look_up; - const MPI_Comm & comm; - const unsigned int my_rank; - const unsigned int n_procs; - std::vector &owning_ranks; - - Dictionary dict; - - std::map> temp; - std::map> recv_indices; - - virtual void - process_request(const unsigned int other_rank, - const std::vector &buffer_recv, - std::vector &request_buffer) override - { - (void)other_rank; - Assert(buffer_recv.size() % 2 == 0, ExcInternalError()); - for (unsigned int j = 0; j < buffer_recv.size(); j += 2) - for (auto i = buffer_recv[j]; i < buffer_recv[j + 1]; i++) - request_buffer.push_back( - dict.actually_owning_ranks[i - dict.local_range.first]); - } - - virtual std::vector - compute_targets() override - { - std::vector targets; - - // 1) collect relevant processes and process local dict entries - { - unsigned int index = 0; - for (auto i : indices_to_look_up) - { - unsigned int other_rank = dict.dof_to_dict_rank(i); - if (other_rank == my_rank) - owning_ranks[index] = - dict.actually_owning_ranks[i - dict.local_range.first]; - else if (targets.empty() || targets.back() != other_rank) - targets.push_back(other_rank); - index++; - } - } - - - for (auto i : targets) - { - recv_indices[i] = {}; - temp[i] = {}; - } - - // 3) collect indices for each process - { - unsigned int index = 0; - for (auto i : indices_to_look_up) - { - unsigned int other_rank = dict.dof_to_dict_rank(i); - if (other_rank != my_rank) - { - recv_indices[other_rank].push_back(index); - temp[other_rank].push_back(i); - } - index++; - } - } - - Assert(targets.size() == recv_indices.size() && - targets.size() == temp.size(), - ExcMessage("Size does not match!")); - - return targets; - } - - virtual void - pack_recv_buffer( - const int other_rank, - std::vector &send_buffer) override - { - // create index set and compress data to be sent - auto & indices_i = temp[other_rank]; - IndexSet is(dict.size); - is.add_indices(indices_i.begin(), indices_i.end()); - is.compress(); - - for (auto interval = is.begin_intervals(); - interval != is.end_intervals(); - interval++) - { - send_buffer.push_back(*interval->begin()); - send_buffer.push_back(interval->last() + 1); - } - } - - virtual void - prepare_recv_buffer(const int other_rank, - std::vector &recv_buffer) override - { - recv_buffer.resize(recv_indices[other_rank].size()); - } - - virtual void - unpack_recv_buffer( - const int other_rank, - const std::vector &recv_buffer) override - { - Assert(recv_indices[other_rank].size() == recv_buffer.size(), - ExcMessage("Sizes do not match!")); - - for (unsigned int j = 0; j < recv_indices[other_rank].size(); j++) - owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j]; - } - }; - - } // namespace ComputeIndexOwner - - - std::vector compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, @@ -1740,10 +1424,8 @@ namespace Utilities // dictionary, the index set is statically repartitioned among the // processes again and extended with information with the actual owner // of that the index. - ComputeIndexOwner::ConsensusAlgorithmProcess process(owned_indices, - indices_to_look_up, - comm, - owning_ranks); + internal::ComputeIndexOwner::ConsensusAlgorithmProcess process( + owned_indices, indices_to_look_up, comm, owning_ranks); // Step 2: read dictionary // Communicate with the process who owns the index in the static -- 2.39.5