From: Peter Munch Date: Tue, 1 Jun 2021 08:27:04 +0000 (+0200) Subject: TriangulationDescription::Utilities::create_description_from_triangulation(): allow... X-Git-Tag: v9.4.0-rc1~1287^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f2157ce0b2bd97f1f7c385bfa8887eb8599e2924;p=dealii.git TriangulationDescription::Utilities::create_description_from_triangulation(): allow subcommunicator in pdt --- diff --git a/include/deal.II/grid/tria_description.h b/include/deal.II/grid/tria_description.h index 2d1ad8f1f5..02644277ed 100644 --- a/include/deal.II/grid/tria_description.h +++ b/include/deal.II/grid/tria_description.h @@ -455,7 +455,12 @@ namespace TriangulationDescription * CellAccessor::global_active_cell_index()). This function allows to * repartition distributed Triangulation objects. * - * @note The communicator is extracted from the Triangulation @p tria. + * @note The communicator is extracted from the vector @p partition. + * + * @note The triangulation @p tria can be set up on a subcommunicator of the + * communicator of @p partitioner. All processes that are not part of that + * subcommunicator need to set up the local triangulation with the + * special-purpose communicator MPI_COMM_NULL. * * @note The multgrid levels are currently not constructed, since * @p partition only describes the partitioning of the active level. diff --git a/source/grid/tria_description.cc b/source/grid/tria_description.cc index 87dabb0026..6a86c6ecfb 100644 --- a/source/grid/tria_description.cc +++ b/source/grid/tria_description.cc @@ -745,6 +745,11 @@ namespace TriangulationDescription const Triangulation & tria, const LinearAlgebra::distributed::Vector &partition) { +#ifdef DEAL_II_WITH_MPI + if (tria.get_communicator() == MPI_COMM_NULL) + AssertDimension(partition.local_size(), 0); +#endif + // 1) determine processes owning locally owned cells const std::vector relevant_processes = [&]() { std::set relevant_processes; @@ -893,14 +898,14 @@ namespace TriangulationDescription }); dealii::Utilities::MPI::ConsensusAlgorithms::Selector( - process, tria.get_communicator()) + process, partition.get_mpi_communicator()) .run(); // remove redundant entries description_merged.reduce(); // convert to actual description - return description_merged.convert(tria.get_communicator(), + return description_merged.convert(partition.get_mpi_communicator(), tria.get_mesh_smoothing()); } diff --git a/tests/fullydistributed_grids/repartitioning_05.cc b/tests/fullydistributed_grids/repartitioning_05.cc new file mode 100644 index 0000000000..d1a701a918 --- /dev/null +++ b/tests/fullydistributed_grids/repartitioning_05.cc @@ -0,0 +1,160 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2021 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Test +// TriangulationDescription::Utilities::create_description_from_triangulation() +// so that it also works for p:d:T set up on a subcommunicator. + +#include + +#include +#include + +#include + +#include +#include +#include +#include + +#include + +#include + +#include "./tests.h" + +using namespace dealii; + +MPI_Comm +create_sub_comm(const MPI_Comm &comm, const unsigned int size) +{ + const auto rank = Utilities::MPI::this_mpi_process(comm); + + int color = rank < size; + + MPI_Comm sub_comm; + MPI_Comm_split(comm, color, rank, &sub_comm); + + if (rank < size) + return sub_comm; + else + { + MPI_Comm_free(&sub_comm); + return MPI_COMM_NULL; + } +} + + + +template +LinearAlgebra::distributed::Vector +partition_distributed_triangulation(const Triangulation &tria_in, + const MPI_Comm & comm) +{ + const auto comm_tria = tria_in.get_communicator(); + + const auto n_global_active_cells = Utilities::MPI::max( + comm_tria == MPI_COMM_NULL ? 0 : tria_in.n_global_active_cells(), comm); + + if (comm_tria == MPI_COMM_NULL) + { + LinearAlgebra::distributed::Vector partition{ + IndexSet(n_global_active_cells), IndexSet(n_global_active_cells), comm}; + partition.update_ghost_values(); + return partition; + } + + const auto tria = + dynamic_cast *>(&tria_in); + + Assert(tria, ExcNotImplemented()); + + LinearAlgebra::distributed::Vector partition( + tria->global_active_cell_index_partitioner().lock()->locally_owned_range(), + tria->global_active_cell_index_partitioner().lock()->ghost_indices(), + comm); + + const unsigned int n_partitions = Utilities::MPI::n_mpi_processes(comm); + + for (const auto &cell : tria_in.active_cell_iterators()) + if (cell->is_locally_owned()) + partition[cell->global_active_cell_index()] = + cell->global_active_cell_index() * n_partitions / + tria_in.n_global_active_cells(); + + partition.update_ghost_values(); + + return partition; +} + + +template +void +test(const MPI_Comm comm, const unsigned int n_partitions) +{ + auto sub_comm = create_sub_comm(comm, n_partitions); + + parallel::distributed::Triangulation tria(sub_comm); + + if (sub_comm != MPI_COMM_NULL) + { + GridGenerator::subdivided_hyper_cube(tria, 4); + tria.refine_global(3); + } + + const auto partition_new = partition_distributed_triangulation(tria, comm); + partition_new.update_ghost_values(); + + // repartition triangulation + const auto construction_data = + TriangulationDescription::Utilities::create_description_from_triangulation( + tria, partition_new); + + parallel::fullydistributed::Triangulation tria_pft(comm); + tria_pft.create_triangulation(construction_data); + + FE_Q fe(2); + DoFHandler dof_handler(tria_pft); + dof_handler.distribute_dofs(fe); + + // print statistics + print_statistics(tria_pft); + print_statistics(dof_handler); + + if (sub_comm != MPI_COMM_NULL) + MPI_Comm_free(&sub_comm); +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi(argc, argv, 1); + MPILogInitAll all; + + MPI_Comm comm = MPI_COMM_WORLD; + + deallog.push("all"); + test<2>(comm, Utilities::MPI::n_mpi_processes(comm)); + deallog.pop(); + + // test that we can eliminate processes + deallog.push("reduced"); + test<2>(comm, + std::max(1, Utilities::MPI::n_mpi_processes(comm) / 2)); + deallog.pop(); +} diff --git a/tests/fullydistributed_grids/repartitioning_05.mpirun=4.output b/tests/fullydistributed_grids/repartitioning_05.mpirun=4.output new file mode 100644 index 0000000000..971927a38e --- /dev/null +++ b/tests/fullydistributed_grids/repartitioning_05.mpirun=4.output @@ -0,0 +1,63 @@ + +DEAL:0:all::n_levels: 4 +DEAL:0:all::n_cells: 469 +DEAL:0:all::n_active_cells: 354 +DEAL:0:all:: +DEAL:0:all::n_dofs: 4225 +DEAL:0:all::n_locally_owned_dofs: 1089 +DEAL:0:all:: +DEAL:0:reduced::n_levels: 4 +DEAL:0:reduced::n_cells: 456 +DEAL:0:reduced::n_active_cells: 344 +DEAL:0:reduced:: +DEAL:0:reduced::n_dofs: 4225 +DEAL:0:reduced::n_locally_owned_dofs: 1105 +DEAL:0:reduced:: + +DEAL:1:all::n_levels: 4 +DEAL:1:all::n_cells: 469 +DEAL:1:all::n_active_cells: 354 +DEAL:1:all:: +DEAL:1:all::n_dofs: 4225 +DEAL:1:all::n_locally_owned_dofs: 1056 +DEAL:1:all:: +DEAL:1:reduced::n_levels: 4 +DEAL:1:reduced::n_cells: 572 +DEAL:1:reduced::n_active_cells: 432 +DEAL:1:reduced:: +DEAL:1:reduced::n_dofs: 4225 +DEAL:1:reduced::n_locally_owned_dofs: 1040 +DEAL:1:reduced:: + + +DEAL:2:all::n_levels: 4 +DEAL:2:all::n_cells: 469 +DEAL:2:all::n_active_cells: 354 +DEAL:2:all:: +DEAL:2:all::n_dofs: 4225 +DEAL:2:all::n_locally_owned_dofs: 1056 +DEAL:2:all:: +DEAL:2:reduced::n_levels: 4 +DEAL:2:reduced::n_cells: 572 +DEAL:2:reduced::n_active_cells: 432 +DEAL:2:reduced:: +DEAL:2:reduced::n_dofs: 4225 +DEAL:2:reduced::n_locally_owned_dofs: 1040 +DEAL:2:reduced:: + + +DEAL:3:all::n_levels: 4 +DEAL:3:all::n_cells: 469 +DEAL:3:all::n_active_cells: 354 +DEAL:3:all:: +DEAL:3:all::n_dofs: 4225 +DEAL:3:all::n_locally_owned_dofs: 1024 +DEAL:3:all:: +DEAL:3:reduced::n_levels: 4 +DEAL:3:reduced::n_cells: 456 +DEAL:3:reduced::n_active_cells: 344 +DEAL:3:reduced:: +DEAL:3:reduced::n_dofs: 4225 +DEAL:3:reduced::n_locally_owned_dofs: 1040 +DEAL:3:reduced:: +