From: Peter Munch Date: Wed, 9 Jun 2021 23:53:53 +0000 (+0200) Subject: Implement AffineConstraints::make_consistent_in_parallel() X-Git-Tag: v9.4.0-rc1~1191^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F12430%2Fhead;p=dealii.git Implement AffineConstraints::make_consistent_in_parallel() --- diff --git a/include/deal.II/base/mpi_tags.h b/include/deal.II/base/mpi_tags.h index a59594d59c..2aca35e75b 100644 --- a/include/deal.II/base/mpi_tags.h +++ b/include/deal.II/base/mpi_tags.h @@ -149,6 +149,10 @@ namespace Utilities // GridTools::internal::distributed_compute_point_locations distributed_compute_point_locations, + // AffineConstraints::make_consistent_in_parallel() + affine_constraints_make_consistent_in_parallel_0, + affine_constraints_make_consistent_in_parallel_1, + }; } // namespace Tags } // namespace internal diff --git a/include/deal.II/lac/affine_constraints.h b/include/deal.II/lac/affine_constraints.h index d8025e47d0..f324dd3930 100644 --- a/include/deal.II/lac/affine_constraints.h +++ b/include/deal.II/lac/affine_constraints.h @@ -1711,6 +1711,16 @@ public: const MPI_Comm & mpi_communicator, const bool verbose = false) const; + /** + * Make the current object consistent on all processors + * in a distributed computation. One should call this function before + * calling close(). + */ + void + make_consistent_in_parallel(const IndexSet &locally_owned_dofs, + const IndexSet &locally_relevant_dofs, + const MPI_Comm mpi_communicator); + /** * Exception * diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index 5d30fd2e71..4b113e6437 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -190,6 +191,361 @@ AffineConstraints::is_consistent_in_parallel( } +namespace internal +{ + template + std::vector< + std::tuple>>> + compute_locally_relevant_constraints( + const dealii::AffineConstraints &constraints_in, + const IndexSet & locally_owned_dofs, + const IndexSet & locally_relevant_dofs, + const MPI_Comm mpi_communicator) + { + using ConstraintType = + std::tuple>>; + + // The result vector filled step by step. + std::vector locally_relevant_constraints; + +#ifndef DEAL_II_WITH_MPI + AssertThrow(false, ExcNotImplemented()); // one should not come here + (void)constraints_in; + (void)locally_owned_dofs; + (void)locally_relevant_dofs; + (void)mpi_communicator; +#else + + const unsigned int my_rank = + Utilities::MPI::this_mpi_process(mpi_communicator); + + // helper function + const auto sort_constraints = [&]() { + std::sort(locally_relevant_constraints.begin(), + locally_relevant_constraints.end(), + [](const auto &a, const auto &b) { + return std::get<0>(a) < std::get<0>(b); + }); + + locally_relevant_constraints.erase( + std::unique(locally_relevant_constraints.begin(), + locally_relevant_constraints.end(), + [](const auto &a, const auto &b) { + return std::get<0>(a) == std::get<0>(b); + }), + locally_relevant_constraints.end()); + }; + + // 0) collect constrained indices of the current object + IndexSet constrained_indices(locally_owned_dofs.size()); + + std::vector constrained_indices_temp; + for (const auto &line : constraints_in.get_lines()) + constrained_indices_temp.push_back(line.index); + + constrained_indices.add_indices(constrained_indices_temp.begin(), + constrained_indices_temp.end()); + + // step 1: identify owners of constraints + std::vector constrained_indices_owners( + constrained_indices.n_elements()); + Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload + constrained_indices_process(locally_owned_dofs, + constrained_indices, + mpi_communicator, + constrained_indices_owners, + true); + + Utilities::MPI::ConsensusAlgorithms::Selector< + std::pair, + unsigned int>(constrained_indices_process, mpi_communicator) + .run(); + + // step 2: collect all locally owned constraints + const auto constrained_indices_by_ranks = + constrained_indices_process.get_requesters(); + + { + const unsigned int tag = Utilities::MPI::internal::Tags:: + affine_constraints_make_consistent_in_parallel_0; + + // ... collect data and sort according to owner + std::map> send_data_temp; + + for (unsigned int i = 0; i < constrained_indices_owners.size(); ++i) + { + ConstraintType entry; + + const types::global_dof_index index = + constrained_indices.nth_index_in_set(i); + + std::get<0>(entry) = index; + + if (constraints_in.is_inhomogeneously_constrained(index)) + std::get<1>(entry) = constraints_in.get_inhomogeneity(index); + + const auto constraints = constraints_in.get_constraint_entries(index); + + if (constraints) + for (const auto &i : *constraints) + std::get<2>(entry).push_back(i); + + if (constrained_indices_owners[i] == my_rank) + locally_relevant_constraints.push_back(entry); + else + send_data_temp[constrained_indices_owners[i]].push_back(entry); + } + + std::map> send_data; + + for (const auto &i : send_data_temp) + send_data[i.first] = Utilities::pack(i.second, false); + + std::vector requests; + requests.reserve(send_data.size()); + + // ... send data + for (const auto &i : send_data) + { + if (i.first == my_rank) + continue; + + requests.push_back({}); + + const int ierr = MPI_Isend(i.second.data(), + i.second.size(), + MPI_CHAR, + i.first, + tag, + mpi_communicator, + &requests.back()); + AssertThrowMPI(ierr); + } + + // ... receive data + unsigned int n_rec_ranks = 0; + + for (const auto &i : constrained_indices_by_ranks) + if (i.first != my_rank) + n_rec_ranks++; + + for (unsigned int i = 0; i < n_rec_ranks; ++i) + { + MPI_Status status; + int ierr = MPI_Probe(MPI_ANY_SOURCE, tag, mpi_communicator, &status); + AssertThrowMPI(ierr); + + int message_length; + ierr = MPI_Get_count(&status, MPI_CHAR, &message_length); + AssertThrowMPI(ierr); + + std::vector buffer(message_length); + + ierr = MPI_Recv(buffer.data(), + buffer.size(), + MPI_CHAR, + status.MPI_SOURCE, + tag, + mpi_communicator, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + const auto data = + Utilities::unpack>(buffer, false); + + for (const auto &i : data) + locally_relevant_constraints.push_back(i); + } + + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + + sort_constraints(); + } + + // step 3: communicate constraints so that each process know how the + // locally locally relevant dofs are constrained + { + const unsigned int tag = Utilities::MPI::internal::Tags:: + affine_constraints_make_consistent_in_parallel_1; + + // ... determine owners of locally relevant dofs + IndexSet locally_relevant_dofs_non_local = locally_relevant_dofs; + locally_relevant_dofs_non_local.subtract_set(locally_owned_dofs); + + std::vector locally_relevant_dofs_owners( + locally_relevant_dofs_non_local.n_elements()); + Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload + locally_relevant_dofs_process(locally_owned_dofs, + locally_relevant_dofs_non_local, + mpi_communicator, + locally_relevant_dofs_owners, + true); + + Utilities::MPI::ConsensusAlgorithms::Selector< + std::pair, + unsigned int>(locally_relevant_dofs_process, mpi_communicator) + .run(); + + const auto locally_relevant_dofs_by_ranks = + locally_relevant_dofs_process.get_requesters(); + + std::map> send_data; + + std::vector requests; + requests.reserve(send_data.size()); + + // ... send data + for (const auto &rank_and_indices : locally_relevant_dofs_by_ranks) + { + Assert(rank_and_indices.first != my_rank, ExcInternalError()); + + std::vector data; + + for (const auto index : rank_and_indices.second) + { + // note: at this stage locally_relevant_constraints still + // contains only locally owned constraints + const auto prt = + std::find_if(locally_relevant_constraints.begin(), + locally_relevant_constraints.end(), + [index](const auto &a) { + return std::get<0>(a) == index; + }); + if (prt != locally_relevant_constraints.end()) + data.push_back(*prt); + } + + send_data[rank_and_indices.first] = Utilities::pack(data, false); + + requests.push_back({}); + + const int ierr = MPI_Isend(send_data[rank_and_indices.first].data(), + send_data[rank_and_indices.first].size(), + MPI_CHAR, + rank_and_indices.first, + tag, + mpi_communicator, + &requests.back()); + AssertThrowMPI(ierr); + } + + // ... receive data + const unsigned int n_rec_ranks = [&]() { + // count number of ranks from where data will be received from + // by looping locally_relevant_dofs_owners and identifying unique + // rank (ignoring the current rank) + + std::set rec_ranks; + + for (const unsigned int rank : locally_relevant_dofs_owners) + if (rank != my_rank) + rec_ranks.insert(rank); + + return rec_ranks.size(); + }(); + + for (unsigned int counter = 0; counter < n_rec_ranks; ++counter) + { + MPI_Status status; + int ierr = MPI_Probe(MPI_ANY_SOURCE, tag, mpi_communicator, &status); + AssertThrowMPI(ierr); + + int message_length; + ierr = MPI_Get_count(&status, MPI_CHAR, &message_length); + AssertThrowMPI(ierr); + + std::vector buffer(message_length); + + ierr = MPI_Recv(buffer.data(), + buffer.size(), + MPI_CHAR, + status.MPI_SOURCE, + tag, + mpi_communicator, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + const auto received_locally_relevant_constrain = + Utilities::unpack>(buffer, false); + + for (const auto &locally_relevant_constrain : + received_locally_relevant_constrain) + locally_relevant_constraints.push_back(locally_relevant_constrain); + } + + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + + sort_constraints(); + } + +#endif + + return locally_relevant_constraints; + } +} // namespace internal + + + +template +void +AffineConstraints::make_consistent_in_parallel( + const IndexSet &locally_owned_dofs, + const IndexSet &locally_relevant_dofs, + const MPI_Comm mpi_communicator) +{ + if (Utilities::MPI::job_supports_mpi() == false || + Utilities::MPI::n_mpi_processes(mpi_communicator) == 1) + return; // nothing to do, since serial + + Assert(sorted == false, ExcMatrixIsClosed()); + + // 1) get all locally relevant constraints ("temporal constraint matrix") + const auto temporal_constraint_matrix = + internal::compute_locally_relevant_constraints(*this, + locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + + // 2) clear the content of this constraint matrix + lines.clear(); + lines_cache.clear(); + + // 3) refill this constraint matrix + for (const auto &line : temporal_constraint_matrix) + { + const types::global_dof_index index = std::get<0>(line); + + // ... line + this->add_line(index); + + // ... inhomogeneity + if (std::get<1>(line) != number()) + this->set_inhomogeneity(index, std::get<1>(line)); + + // ... entries + if (std::get<2>(line).size() > 0) + for (const auto &j : std::get<2>(line)) + this->add_entry(index, j.first, j.second); + } + +#ifdef DEBUG + Assert(this->is_consistent_in_parallel( + Utilities::MPI::all_gather(mpi_communicator, locally_owned_dofs), + locally_relevant_dofs, + mpi_communicator), + ExcInternalError()); +#endif +} + + template void diff --git a/tests/lac/constraints_make_consistent_in_parallel_01.cc b/tests/lac/constraints_make_consistent_in_parallel_01.cc new file mode 100644 index 0000000000..d92dd2275b --- /dev/null +++ b/tests/lac/constraints_make_consistent_in_parallel_01.cc @@ -0,0 +1,121 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2021 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Test AffineConstraints::make_consistent_in_parallel(). + +#include + +#include + +#include +#include + +#include + +#include + +#include + +#include "../tests.h" + +using namespace dealii; + +/** + * mpirun -np 4 ./constraints_01 + * + * 6-----7-----8 + * | 0/2 | 1/3 | + * 2-----3-----5 + * | 0/0 | 0/1 | ... with dof-index, material-id/rank + * 0-----1-----4 + * + * Expected output: {3}, {3, 5}, {3, 7}, {3, 5, 7} + * Actual output: {}, {3, 5}, {3, 7}, {3, 5, 7} + * + * ... code to reproduce https://github.com/dealii/dealii/issues/11725 + */ +template +IndexSet +collect_lines(const AffineConstraints &constraints, + const unsigned int size) +{ + IndexSet lines_local(size); + for (const auto &line : constraints.get_lines()) + lines_local.add_index(line.index); + return lines_local; +} + +template +void +test(const DoFHandler &dof_handler, + const IndexSet & locally_relevant_dofs) +{ + AffineConstraints constraints; + + std::vector dof_indices( + dof_handler.get_fe().n_dofs_per_face()); + + for (const auto &cell : dof_handler.active_cell_iterators()) + for (const auto face : cell->face_indices()) + if (cell->is_locally_owned() && !cell->at_boundary(face) && + cell->material_id() != cell->neighbor(face)->material_id()) + { + cell->face(face)->get_dof_indices(dof_indices); + for (const auto i : dof_indices) + constraints.add_line(i); + } + + const auto a = collect_lines(constraints, dof_handler.n_dofs()); + a.print(deallog.get_file_stream()); + + constraints.make_consistent_in_parallel(dof_handler.locally_owned_dofs(), + locally_relevant_dofs, + dof_handler.get_communicator()); + + const auto b = collect_lines(constraints, dof_handler.n_dofs()); + b.print(deallog.get_file_stream()); +} + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi(argc, argv, 1); + MPILogInitAll all; + + const int dim = 2; + + ConditionalOStream pcout(std::cout, + Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == + 0); + + parallel::distributed::Triangulation tria(MPI_COMM_WORLD); + GridGenerator::subdivided_hyper_cube(tria, 2); + + DoFHandler dof_handler(tria); + dof_handler.distribute_dofs(FE_Q(1)); + + for (const auto &cell : tria.active_cell_iterators()) + if (cell->center()[0] > 0.5 && cell->center()[1] > 0.5) + cell->set_material_id(1); + + IndexSet locally_active_dofs; + DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); + test(dof_handler, locally_active_dofs); + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + test(dof_handler, locally_relevant_dofs); +} diff --git a/tests/lac/constraints_make_consistent_in_parallel_01.with_p4est=true.mpirun=4.output b/tests/lac/constraints_make_consistent_in_parallel_01.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..e1392e6b58 --- /dev/null +++ b/tests/lac/constraints_make_consistent_in_parallel_01.with_p4est=true.mpirun=4.output @@ -0,0 +1,23 @@ + +{} +{3} +{} +{3, 5, 7} + +{3, 5} +{3, 5} +{3, 5} +{3, 5, 7} + + +{3, 7} +{3, 7} +{3, 7} +{3, 5, 7} + + +{3, 5, 7} +{3, 5, 7} +{3, 5, 7} +{3, 5, 7} +