From 3fa1f378fcfbe762f506e5501556b442ea4cbffc Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Sat, 21 Mar 2020 18:27:53 +0100 Subject: [PATCH] Use MPI in tests --- include/deal.II/lac/la_sm_partitioner.h | 207 +++++++++++++++++- include/deal.II/lac/la_sm_vector.templates.h | 3 +- include/deal.II/matrix_free/matrix_free.h | 10 +- tests/sm/vector_sm_01.cc | 14 +- ...01.output => vector_sm_01.mpirun=1.output} | 0 5 files changed, 219 insertions(+), 15 deletions(-) rename tests/sm/{vector_sm_01.output => vector_sm_01.mpirun=1.output} (100%) diff --git a/include/deal.II/lac/la_sm_partitioner.h b/include/deal.II/lac/la_sm_partitioner.h index 5245b3198f..02cc66481c 100644 --- a/include/deal.II/lac/la_sm_partitioner.h +++ b/include/deal.II/lac/la_sm_partitioner.h @@ -18,6 +18,7 @@ #include +#include #include @@ -32,10 +33,12 @@ namespace LinearAlgebra class Partitioner : public LinearAlgebra::CommunicationPatternBase { public: - Partitioner(const MPI_Comm &comm, const MPI_Comm &comm_sm) + Partitioner(const MPI_Comm &comm, const MPI_Comm &comm_sm, const IndexSet &is_locally_owned, const IndexSet &is_locally_ghost) : comm(comm) , comm_sm(comm_sm) - {} + { + reinit(is_locally_owned, is_locally_ghost); + } const MPI_Comm & get_mpi_communicator() const override @@ -44,18 +47,210 @@ namespace LinearAlgebra } void - reinit(const IndexSet &indexset_has, - const IndexSet &indexset_want, + reinit(const IndexSet &is_locally_owned, + const IndexSet &is_locally_ghost, const MPI_Comm &communicator) override { - (void)indexset_has; - (void)indexset_want; + Assert(false, ExcNotImplemented()); + (void)is_locally_owned; + (void)is_locally_ghost; (void)communicator; } + void + reinit(const IndexSet &is_locally_owned, + const IndexSet &is_locally_ghost) + { + this->n_local_elements = is_locally_owned.n_elements(); + + std::vector sm_ranks( + Utilities::MPI::n_mpi_processes(comm_sm)); + + const unsigned int rank = Utilities::MPI::this_mpi_process(comm); + + MPI_Allgather( + &rank, 1, MPI_UNSIGNED, sm_ranks.data(), 1, MPI_UNSIGNED, comm_sm); + + std::vector owning_ranks_of_ghosts( + is_locally_ghost.n_elements()); + + Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload + process(is_locally_owned, + is_locally_ghost, + comm, + owning_ranks_of_ghosts, + true); + + Utilities::MPI::ConsensusAlgorithms::Selector< + std::pair, + unsigned int> + consensus_algorithm(process, comm); + consensus_algorithm.run(); + + { + std::map> + rank_to_local_indices; + + for (unsigned int i = 0; i < owning_ranks_of_ghosts.size(); i++) + rank_to_local_indices[owning_ranks_of_ghosts[i]].push_back(i); + + unsigned int offset = 0; + + + for (const auto &rank_and_local_indices : rank_to_local_indices) + { + const auto ptr = std::find(sm_ranks.begin(), + sm_ranks.end(), + rank_and_local_indices.first); + + if (ptr == sm_ranks.end()) + { + // remote process + recv_remote_ranks.push_back(rank_and_local_indices.first); + recv_remote_ptr.push_back( + recv_remote_ptr.back() + + rank_and_local_indices.second.size()); + } + else + { + // shared process + recv_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr)); + recv_sm_ptr.push_back(recv_sm_ptr.back() + + rank_and_local_indices.second.size()); + recv_sm_offset.push_back(is_locally_owned.n_elements() + + offset); + } + offset += rank_and_local_indices.second.size(); + } + recv_remote_req.resize(recv_remote_ranks.size()); + recv_sm_req.resize(recv_sm_ranks.size()); + + recv_sm_indices.resize(recv_sm_ptr.back()); + } + + { + const auto rank_to_global_indices = process.get_requesters(); + + for (const auto &rank_and_global_indices : rank_to_global_indices) + { + const auto ptr = std::find(sm_ranks.begin(), + sm_ranks.end(), + rank_and_global_indices.first); + + if (ptr == sm_ranks.end()) + { + // remote process + send_remote_ranks.push_back(rank_and_global_indices.first); + + for (const auto &i : rank_and_global_indices.second) + send_remote_indices.push_back( + is_locally_owned.index_within_set(i)); + + send_remote_ptr.push_back(send_remote_indices.size()); + } + else + { + // shared process + send_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr)); + + for (const auto &i : rank_and_global_indices.second) + send_sm_indices.push_back( + is_locally_owned.index_within_set(i)); + + send_sm_ptr.push_back(send_sm_indices.size()); + } + } + send_remote_req.resize(send_remote_ranks.size()); + send_sm_req.resize(send_sm_ranks.size()); + } + + { + for (unsigned int i = 0; i < send_sm_ranks.size(); i++) + MPI_Isend(send_sm_indices.data() + send_sm_ptr[i], + send_sm_ptr[i + 1] - send_sm_ptr[i], + MPI_UNSIGNED, + send_sm_ranks[i], + 2, + comm_sm, + send_sm_req.data() + i); + + for (unsigned int i = 0; i < recv_sm_ranks.size(); i++) + MPI_Irecv(recv_sm_indices.data() + recv_sm_ptr[i], + recv_sm_ptr[i + 1] - recv_sm_ptr[i], + MPI_UNSIGNED, + recv_sm_ranks[i], + 2, + comm_sm, + recv_sm_req.data() + i); + + MPI_Waitall(recv_sm_req.size(), + recv_sm_req.data(), + MPI_STATUSES_IGNORE); + MPI_Waitall(send_sm_req.size(), + send_sm_req.data(), + MPI_STATUSES_IGNORE); + } + + { + send_sm_offset.resize(send_sm_ranks.size()); + + for (unsigned int i = 0; i < send_sm_ranks.size(); i++) + MPI_Irecv(send_sm_offset.data() + i, + 1, + MPI_UNSIGNED, + send_sm_ranks[i], + 3, + comm_sm, + send_sm_req.data() + i); + + for (unsigned int i = 0; i < recv_sm_ranks.size(); i++) + MPI_Isend(recv_sm_offset.data() + i, + 1, + MPI_UNSIGNED, + recv_sm_ranks[i], + 3, + comm_sm, + recv_sm_req.data() + i); + + MPI_Waitall(recv_sm_req.size(), + recv_sm_req.data(), + MPI_STATUSES_IGNORE); + MPI_Waitall(send_sm_req.size(), + send_sm_req.data(), + MPI_STATUSES_IGNORE); + } + + buffer.resize(send_remote_ptr.back()); + } + private: const MPI_Comm &comm; const MPI_Comm &comm_sm; + + unsigned int n_local_elements; + + AlignedVector buffer; + + std::vector recv_remote_ranks; + std::vector recv_remote_ptr = {0}; + std::vector recv_remote_req; + + std::vector recv_sm_ranks; + std::vector recv_sm_ptr = {0}; + std::vector recv_sm_req; + std::vector recv_sm_indices; + std::vector recv_sm_offset; + + std::vector send_remote_ranks; + std::vector send_remote_ptr = {0}; + std::vector send_remote_indices; + std::vector send_remote_req; + + std::vector send_sm_ranks; + std::vector send_sm_ptr = {0}; + std::vector send_sm_indices; + std::vector send_sm_req; + std::vector send_sm_offset; }; } // end of namespace SharedMPI diff --git a/include/deal.II/lac/la_sm_vector.templates.h b/include/deal.II/lac/la_sm_vector.templates.h index 2e46004f13..a55fd3d6af 100644 --- a/include/deal.II/lac/la_sm_vector.templates.h +++ b/include/deal.II/lac/la_sm_vector.templates.h @@ -97,7 +97,8 @@ namespace LinearAlgebra void Vector::clear_mpi_requests() { - Assert(false, ExcNotImplemented()); + // TODO: what is the use of this? + // it should probably delegated to partitioner } diff --git a/include/deal.II/matrix_free/matrix_free.h b/include/deal.II/matrix_free/matrix_free.h index a3d9078ea1..d8841bad93 100644 --- a/include/deal.II/matrix_free/matrix_free.h +++ b/include/deal.II/matrix_free/matrix_free.h @@ -1482,7 +1482,7 @@ public: template void initialize_dof_vector(LinearAlgebra::SharedMPI::Vector &vec, - const MPI_Comm comm_sm, + const MPI_Comm & comm_sm, const unsigned int dof_handler_index = 0) const; /** @@ -2231,17 +2231,19 @@ template inline void MatrixFree::initialize_dof_vector( LinearAlgebra::SharedMPI::Vector &vec, - const MPI_Comm comm_sm, + const MPI_Comm & comm_sm, const unsigned int comp) const { AssertIndexRange(comp, n_components()); + const auto & part = dof_info[comp].vector_partitioner; + if (partitioner_sm[comp][comm_sm] == nullptr) partitioner_sm[comp][comm_sm] = std::make_shared>( - dof_info[comp].vector_partitioner->get_communicator(), comm_sm); + part->get_communicator(), comm_sm, part->locally_owned_range(), part->ghost_indices()); - vec.reinit(dof_info[comp].vector_partitioner, partitioner_sm[comp][comm_sm]); + vec.reinit(part, partitioner_sm[comp][comm_sm]); } diff --git a/tests/sm/vector_sm_01.cc b/tests/sm/vector_sm_01.cc index 17d6097391..1506d1064c 100644 --- a/tests/sm/vector_sm_01.cc +++ b/tests/sm/vector_sm_01.cc @@ -36,7 +36,7 @@ using namespace dealii; template void -test(const int n_refinements, const int degree) +test(const int n_refinements, const int degree, const int group_size) { Triangulation tria; GridGenerator::hyper_cube(tria); @@ -60,16 +60,22 @@ test(const int n_refinements, const int degree) dealii::MatrixFree matrix_free; matrix_free.reinit(mapping, dof_handler, constraint, quad, additional_data); + MPI_Comm comm = MPI_COMM_WORLD; + + const unsigned int rank = Utilities::MPI::this_mpi_process(comm); + MPI_Comm comm_sm; + MPI_Comm_split(comm, rank / group_size, rank, &comm_sm); LinearAlgebra::SharedMPI::Vector vec; matrix_free.initialize_dof_vector(vec, comm_sm); } int -main() +main(int argc, char *argv[]) { - initlog(); + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; - test(1, 1); + test(1, 1, 1); } \ No newline at end of file diff --git a/tests/sm/vector_sm_01.output b/tests/sm/vector_sm_01.mpirun=1.output similarity index 100% rename from tests/sm/vector_sm_01.output rename to tests/sm/vector_sm_01.mpirun=1.output -- 2.39.5