From: Peter Munch Date: Thu, 24 Mar 2022 16:47:11 +0000 (+0100) Subject: Add functions to MGTools X-Git-Tag: v9.4.0-rc1~312^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8f8338f8adbb3b38c21b9d10edcd392ca42b5f85;p=dealii.git Add functions to MGTools --- diff --git a/include/deal.II/multigrid/mg_tools.h b/include/deal.II/multigrid/mg_tools.h index 1b0b2d7f05..fb3b22c7d0 100644 --- a/include/deal.II/multigrid/mg_tools.h +++ b/include/deal.II/multigrid/mg_tools.h @@ -283,15 +283,43 @@ namespace MGTools unsigned int max_level_for_coarse_mesh(const Triangulation &tria); + /** + * This function returns the local work of each level, i.e., the + * number of locally-owned cells on each multigrid level. + * + * @note This function requires that + * parallel::TriangulationBase::is_multilevel_hierarchy_constructed() + * is true, which can be controlled by setting the + * construct_multigrid_hierarchy flag when constructing the + * Triangulation. + */ + template + std::vector + local_workload(const Triangulation &tria); + + /** + * Similar to the above function but for a vector of triangulations as created + * e.g. by + * MGTransferGlobalCoarseningTools::create_geometric_coarsening_sequence(). + * This returns the number of locally-owned cells on each of the + * triangulations. + */ + template + std::vector + local_workload( + const std::vector>> + &trias); + /** * Return the imbalance of the parallel distribution of the multigrid - * mesh hierarchy. Ideally this value is equal to 1 (every processor owns + * mesh hierarchy, based on the local workload provided by local_workload(). + * Ideally this value is equal to 1 (every processor owns * the same number of cells on each level, approximately true for most * globally refined meshes). Values greater than 1 estimate the slowdown * one should see in a geometric multigrid v-cycle as compared with the same * computation on a perfectly distributed mesh hierarchy. * - * This function is a collective MPI call between all ranks of the + * @note This function is a collective MPI call between all ranks of the * Triangulation and therefore needs to be called from all ranks. * * @note This function requires that @@ -304,6 +332,69 @@ namespace MGTools double workload_imbalance(const Triangulation &tria); + /** + * Similar to the above function but for a vector of triangulations as created + * e.g. by + * MGTransferGlobalCoarseningTools::create_geometric_coarsening_sequence(). + */ + template + double + workload_imbalance( + const std::vector>> + &trias); + + /** + * Return the vertical communication cost between levels. + * The returned vector contains for each level the number + * of cells that have the same owning process as their + * corresponding coarse cell (parent) and the number of cells that have not + * the same owning process as their corresponding coarse cell. + */ + template + std::vector> + local_vertical_communication_cost(const Triangulation &tria); + + /** + * Similar to the above function but for a vector of triangulations as created + * e.g. by + * MGTransferGlobalCoarseningTools::create_geometric_coarsening_sequence(). + * + * @note This function is a collective MPI call between all ranks of the + * Triangulation and therefore needs to be called from all ranks. + */ + template + std::vector> + local_vertical_communication_cost( + const std::vector>> + &trias); + + /** + * Share of fine cells that have the same owning process as their + * corresponding coarse cell (parent). This quantity gives an + * indication on the efficiency of a multigrid transfer operator + * and on how much data has to be sent around. A small number indicates + * that most of the data has to be completely permuted, involving a large + * volume of communication. + * + * @note This function is a collective MPI call between all ranks of the + * Triangulation and therefore needs to be called from all ranks. + */ + template + double + vertical_communication_efficiency(const Triangulation &tria); + + /** + * Similar to the above function but for a vector of triangulations as created + * e.g. by + * MGTransferGlobalCoarseningTools::create_geometric_coarsening_sequence(). + */ + template + double + vertical_communication_efficiency( + const std::vector>> + &trias); + + } // namespace MGTools /* @} */ diff --git a/source/multigrid/mg_tools.cc b/source/multigrid/mg_tools.cc index 706f3b24cd..7b2bc36083 100644 --- a/source/multigrid/mg_tools.cc +++ b/source/multigrid/mg_tools.cc @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -24,6 +25,7 @@ #include #include +#include #include #include @@ -1596,61 +1598,269 @@ namespace MGTools - template - double - workload_imbalance(const Triangulation &tria) + namespace internal { - double workload_imbalance = 1.0; + double + workload_imbalance( + const std::vector &n_cells_on_levels, + const MPI_Comm comm) + { + std::vector n_cells_on_leves_max( + n_cells_on_levels.size()); + std::vector n_cells_on_leves_sum( + n_cells_on_levels.size()); + + Utilities::MPI::max(n_cells_on_levels, comm, n_cells_on_leves_max); + Utilities::MPI::sum(n_cells_on_levels, comm, n_cells_on_leves_sum); + + const unsigned int n_proc = Utilities::MPI::n_mpi_processes(comm); + + const double ideal_work = std::accumulate(n_cells_on_leves_sum.begin(), + n_cells_on_leves_sum.end(), + 0) / + static_cast(n_proc); + const double workload_imbalance = + std::accumulate(n_cells_on_leves_max.begin(), + n_cells_on_leves_max.end(), + 0) / + ideal_work; + + return workload_imbalance; + } + + + + double + vertical_communication_efficiency( + const std::vector< + std::pair> &cells, + const MPI_Comm comm) + { + std::vector cells_local(cells.size()); + std::vector cells_remote(cells.size()); + + for (unsigned int i = 0; i < cells.size(); ++i) + { + cells_local[i] = cells[i].first; + cells_remote[i] = cells[i].second; + } + + std::vector cells_local_sum(cells_local.size()); + Utilities::MPI::sum(cells_local, comm, cells_local_sum); + + std::vector cells_remote_sum( + cells_remote.size()); + Utilities::MPI::sum(cells_remote, comm, cells_remote_sum); + + const auto n_cells_local = + std::accumulate(cells_local_sum.begin(), cells_local_sum.end(), 0); + const auto n_cells_remote = + std::accumulate(cells_remote_sum.begin(), cells_remote_sum.end(), 0); + + return static_cast(n_cells_local) / + (n_cells_local + n_cells_remote); + } + } // namespace internal + - // It is only necessary to calculate the imbalance - // on a distributed mesh. The imbalance is always - // 1.0 for the serial case. + + template + std::vector + local_workload(const Triangulation &tria) + { if (const parallel::TriangulationBase *tr = dynamic_cast *>( &tria)) - { - Assert( - tr->is_multilevel_hierarchy_constructed(), - ExcMessage( - "We can only compute the workload imbalance if the multilevel hierarchy has been constructed!")); + Assert( + tr->is_multilevel_hierarchy_constructed(), + ExcMessage( + "We can only compute the workload imbalance if the multilevel hierarchy has been constructed!")); - const unsigned int n_proc = - Utilities::MPI::n_mpi_processes(tr->get_communicator()); - const unsigned int n_global_levels = tr->n_global_levels(); + const unsigned int n_global_levels = tria.n_global_levels(); - // This value will represent the sum over the multigrid - // levels of the maximum number of cells owned by any - // one processesor on that level. - types::global_dof_index work_estimate = 0; + std::vector n_cells_on_levels(n_global_levels); - // Sum of all cells in the multigrid hierarchy - types::global_dof_index total_cells_in_hierarchy = 0; + for (unsigned int lvl = 0; lvl < n_global_levels; ++lvl) + for (const auto &cell : tria.cell_iterators_on_level(lvl)) + if (cell->is_locally_owned_on_level()) + ++n_cells_on_levels[lvl]; - for (int lvl = n_global_levels - 1; lvl >= 0; --lvl) - { - // Number of cells this processor owns on this level - types::global_dof_index n_owned_cells_on_lvl = 0; + return n_cells_on_levels; + } - for (const auto &cell : tr->cell_iterators_on_level(lvl)) - if (cell->is_locally_owned_on_level()) - ++n_owned_cells_on_lvl; - work_estimate += - dealii::Utilities::MPI::max(n_owned_cells_on_lvl, - tr->get_communicator()); - total_cells_in_hierarchy += - dealii::Utilities::MPI::sum(n_owned_cells_on_lvl, - tr->get_communicator()); - } + template + std::vector + local_workload( + const std::vector>> + &trias) + { + const unsigned int n_global_levels = trias.size(); + + std::vector n_cells_on_levels(n_global_levels); + + for (unsigned int lvl = 0; lvl < n_global_levels; ++lvl) + for (const auto &cell : trias[lvl]->active_cell_iterators()) + if (cell->is_locally_owned()) + ++n_cells_on_levels[lvl]; + + return n_cells_on_levels; + } + + + + template + double + workload_imbalance(const Triangulation &tria) + { + return internal::workload_imbalance(local_workload(tria), + tria.get_communicator()); + } + + + + template + double + workload_imbalance( + const std::vector>> + &trias) + { + return internal::workload_imbalance(local_workload(trias), + trias.back()->get_communicator()); + } - const double ideal_work = - total_cells_in_hierarchy / static_cast(n_proc); - workload_imbalance = work_estimate / ideal_work; + + + template + std::vector> + local_vertical_communication_cost(const Triangulation &tria) + { + const unsigned int n_global_levels = tria.n_global_levels(); + + std::vector> + cells(n_global_levels); + + const MPI_Comm communicator = tria.get_communicator(); + + const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator); + + for (unsigned int lvl = 0; lvl < n_global_levels - 1; ++lvl) + for (const auto &cell : tria.cell_iterators_on_level(lvl)) + if (cell->is_locally_owned_on_level() && cell->has_children()) + for (unsigned int i = 0; i < GeometryInfo::max_children_per_cell; + ++i) + { + const auto level_subdomain_id = + cell->child(i)->level_subdomain_id(); + if (level_subdomain_id == my_rank) + ++cells[lvl + 1].first; + else if (level_subdomain_id != numbers::invalid_unsigned_int) + ++cells[lvl + 1].second; + else + AssertThrow(false, ExcNotImplemented()); + } + + return cells; + } + + + + template + std::vector> + local_vertical_communication_cost( + const std::vector>> + &trias) + { + const unsigned int n_global_levels = trias.size(); + + std::vector> + cells(n_global_levels); + + const MPI_Comm communicator = trias.back()->get_communicator(); + + const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator); + + for (unsigned int lvl = 0; lvl < n_global_levels - 1; ++lvl) + { + const auto &tria_coarse = *trias[lvl]; + const auto &tria_fine = *trias[lvl + 1]; + + const unsigned int n_coarse_cells = tria_fine.n_global_coarse_cells(); + const unsigned int n_global_levels = tria_fine.n_global_levels(); + + const dealii::internal::CellIDTranslator cell_id_translator( + n_coarse_cells, n_global_levels); + + IndexSet is_fine_owned(cell_id_translator.size()); + IndexSet is_fine_required(cell_id_translator.size()); + + for (const auto &cell : tria_fine.active_cell_iterators()) + if (!cell->is_artificial() && cell->is_locally_owned()) + is_fine_owned.add_index(cell_id_translator.translate(cell)); + + for (const auto &cell : tria_coarse.active_cell_iterators()) + if (!cell->is_artificial() && cell->is_locally_owned()) + { + if (cell->level() + 1u == tria_fine.n_global_levels()) + continue; + + for (unsigned int i = 0; + i < GeometryInfo::max_children_per_cell; + ++i) + is_fine_required.add_index( + cell_id_translator.translate(cell, i)); + } + + std::vector is_fine_required_ranks( + is_fine_required.n_elements()); + + Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload + process(is_fine_owned, + is_fine_required, + communicator, + is_fine_required_ranks, + false); + + Utilities::MPI::ConsensusAlgorithms::Selector< + std::pair, + unsigned int> + consensus_algorithm; + consensus_algorithm.run(process, communicator); + + for (unsigned i = 0; i < is_fine_required.n_elements(); ++i) + if (is_fine_required_ranks[i] == my_rank) + ++cells[lvl + 1].first; + else if (is_fine_required_ranks[i] != numbers::invalid_unsigned_int) + ++cells[lvl + 1].second; } - return workload_imbalance; + return cells; + } + + + + template + double + vertical_communication_efficiency(const Triangulation &tria) + { + return internal::vertical_communication_efficiency( + local_vertical_communication_cost(tria), tria.get_communicator()); + } + + + + template + double + vertical_communication_efficiency( + const std::vector>> + &trias) + { + return internal::vertical_communication_efficiency( + local_vertical_communication_cost(trias), + trias.back()->get_communicator()); } + } // namespace MGTools diff --git a/source/multigrid/mg_tools.inst.in b/source/multigrid/mg_tools.inst.in index f0513a8f5b..55328fcae2 100644 --- a/source/multigrid/mg_tools.inst.in +++ b/source/multigrid/mg_tools.inst.in @@ -199,9 +199,47 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS) max_level_for_coarse_mesh( const Triangulation &tria); + template std::vector + local_workload( + const Triangulation &tria); + + template std::vector + local_workload( + const std::vector>> + &tria); + + template double + workload_imbalance( + const Triangulation &tria); + template double workload_imbalance( + const std::vector>> + &tria); + + template std::vector< + std::pair> + local_vertical_communication_cost( const Triangulation &tria); + + template std::vector< + std::pair> + local_vertical_communication_cost( + const std::vector>> + &tria); + + template double + vertical_communication_efficiency( + const Triangulation &tria); + + template double + vertical_communication_efficiency( + const std::vector>> + &tria); #endif \} } diff --git a/tests/multigrid/mg_stats_01.cc b/tests/multigrid/mg_stats_01.cc new file mode 100644 index 0000000000..65de32e0df --- /dev/null +++ b/tests/multigrid/mg_stats_01.cc @@ -0,0 +1,123 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Test geometric metrics from MGTools. + +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "../tests.h" + +template +void +create_quadrant(Triangulation &tria, const unsigned int n_refinements) +{ + GridGenerator::subdivided_hyper_cube(tria, 2, -1.0, +1.0); + + if (n_refinements == 0) + return; + + for (unsigned int i = 0; i < n_refinements; i++) + { + for (auto cell : tria.active_cell_iterators()) + if (cell->is_locally_owned()) + { + bool flag = true; + for (int d = 0; d < dim; d++) + if (cell->center()[d] > 0.0) + flag = false; + if (flag) + cell->set_refine_flag(); + } + tria.execute_coarsening_and_refinement(); + } +} + + +template +void +print_stats(const T &tria) +{ + deallog << "Workload efficiency: " + << 1.0 / MGTools::workload_imbalance(tria) << std::endl; + + const auto local_workload = MGTools::local_workload(tria); + const auto local_vertical_communication_cost = + MGTools::local_vertical_communication_cost(tria); + + deallog << "Local workload: "; + for (unsigned i = 0; i < local_workload.size(); ++i) + deallog << local_workload[i] << " "; + deallog << std::endl; + + deallog << "Vertical communication efficiency: " + << MGTools::vertical_communication_efficiency(tria) << std::endl; + + deallog << "Vertical communication costs: "; + for (unsigned i = 0; i < local_workload.size(); ++i) + deallog << "(" << local_vertical_communication_cost[i].first << ", " + << local_vertical_communication_cost[i].second << ") "; + deallog << std::endl; +} + + +template +void +test() +{ + parallel::distributed::Triangulation tria( + MPI_COMM_WORLD, + Triangulation::limit_level_difference_at_vertices, + parallel::distributed::Triangulation::construct_multigrid_hierarchy); + + create_quadrant(tria, 2); + + print_stats(tria); + + const RepartitioningPolicyTools::FirstChildPolicy policy(tria); + const auto trias = + MGTransferGlobalCoarseningTools::create_geometric_coarsening_sequence( + tria, policy); + + print_stats(trias); +} + + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + deallog.push("2d"); + test<2>(); + deallog.pop(); +} diff --git a/tests/multigrid/mg_stats_01.with_p4est=true.mpirun=3.output b/tests/multigrid/mg_stats_01.with_p4est=true.mpirun=3.output new file mode 100644 index 0000000000..b7979df431 --- /dev/null +++ b/tests/multigrid/mg_stats_01.with_p4est=true.mpirun=3.output @@ -0,0 +1,29 @@ + +DEAL:0:2d::Workload efficiency: 0.666667 +DEAL:0:2d::Local workload: 1 2 8 +DEAL:0:2d::Vertical communication efficiency: 0.937500 +DEAL:0:2d::Vertical communication costs: (0, 0) (2, 2) (8, 0) +DEAL:0:2d::Workload efficiency: 0.764706 +DEAL:0:2d::Local workload: 1 2 8 +DEAL:0:2d::Vertical communication efficiency: 0.937500 +DEAL:0:2d::Vertical communication costs: (0, 0) (2, 2) (8, 0) + +DEAL:1:2d::Workload efficiency: 0.666667 +DEAL:1:2d::Local workload: 1 6 8 +DEAL:1:2d::Vertical communication efficiency: 0.937500 +DEAL:1:2d::Vertical communication costs: (0, 0) (4, 0) (8, 0) +DEAL:1:2d::Workload efficiency: 0.764706 +DEAL:1:2d::Local workload: 1 3 12 +DEAL:1:2d::Vertical communication efficiency: 0.937500 +DEAL:1:2d::Vertical communication costs: (0, 0) (0, 0) (12, 0) + + +DEAL:2:2d::Workload efficiency: 0.666667 +DEAL:2:2d::Local workload: 2 8 0 +DEAL:2:2d::Vertical communication efficiency: 0.937500 +DEAL:2:2d::Vertical communication costs: (0, 0) (8, 0) (0, 0) +DEAL:2:2d::Workload efficiency: 0.764706 +DEAL:2:2d::Local workload: 2 2 8 +DEAL:2:2d::Vertical communication efficiency: 0.937500 +DEAL:2:2d::Vertical communication costs: (0, 0) (0, 0) (8, 0) +