From 4064999c007048d2ab18656ac7634aec52be52aa Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Sat, 29 May 2021 14:56:44 +0200 Subject: [PATCH] Add TriangulationDescription::Utilities::create_description_from_triangulation() version to repartition p:d:T --- include/deal.II/grid/tria_description.h | 20 + source/grid/tria_description.cc | 381 ++++++++++++++++++ source/grid/tria_description.inst.in | 5 + .../repartitioning_01.cc | 113 ++++++ .../repartitioning_01.mpirun=4.output | 63 +++ tests/fullydistributed_grids/tests.h | 4 +- 6 files changed, 583 insertions(+), 3 deletions(-) create mode 100644 tests/fullydistributed_grids/repartitioning_01.cc create mode 100644 tests/fullydistributed_grids/repartitioning_01.mpirun=4.output diff --git a/include/deal.II/grid/tria_description.h b/include/deal.II/grid/tria_description.h index a2b38df3b6..2d1ad8f1f5 100644 --- a/include/deal.II/grid/tria_description.h +++ b/include/deal.II/grid/tria_description.h @@ -23,6 +23,8 @@ #include #include +#include + DEAL_II_NAMESPACE_OPEN @@ -446,6 +448,24 @@ namespace TriangulationDescription TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in = numbers::invalid_unsigned_int); + /** + * Similar to the above function but the owner of active cells are provided + * by a cell vector (see also + * parallel::TriangulationBase::global_active_cell_index_partitioner() and + * CellAccessor::global_active_cell_index()). This function allows to + * repartition distributed Triangulation objects. + * + * @note The communicator is extracted from the Triangulation @p tria. + * + * @note The multgrid levels are currently not constructed, since + * @p partition only describes the partitioning of the active level. + */ + template + Description + create_description_from_triangulation( + const Triangulation & tria, + const LinearAlgebra::distributed::Vector &partition); + /** * Construct a TriangulationDescription::Description. In contrast diff --git a/source/grid/tria_description.cc b/source/grid/tria_description.cc index 5f6ee1e00d..89d9dd6c94 100644 --- a/source/grid/tria_description.cc +++ b/source/grid/tria_description.cc @@ -15,6 +15,7 @@ #include #include +#include #include @@ -525,6 +526,386 @@ namespace TriangulationDescription #endif } + template + struct DescriptionTemp + { + template + void + serialize(Archive &ar, const unsigned int /*version*/) + { + ar &coarse_cells; + ar &coarse_cell_vertices; + ar &coarse_cell_index_to_coarse_cell_id; + ar &cell_infos; + } + + void + merge(const DescriptionTemp &other) + { + this->cell_infos.resize( + std::max(other.cell_infos.size(), this->cell_infos.size())); + + this->coarse_cells.insert(this->coarse_cells.end(), + other.coarse_cells.begin(), + other.coarse_cells.end()); + this->coarse_cell_vertices.insert(this->coarse_cell_vertices.end(), + other.coarse_cell_vertices.begin(), + other.coarse_cell_vertices.end()); + this->coarse_cell_index_to_coarse_cell_id.insert( + this->coarse_cell_index_to_coarse_cell_id.end(), + other.coarse_cell_index_to_coarse_cell_id.begin(), + other.coarse_cell_index_to_coarse_cell_id.end()); + + for (unsigned int i = 0; i < this->cell_infos.size(); ++i) + this->cell_infos[i].insert(this->cell_infos[i].end(), + other.cell_infos[i].begin(), + other.cell_infos[i].end()); + } + + void + reduce() + { + // make coarse cells unique + { + std::vector, + unsigned int>> + temp; + + for (unsigned int i = 0; i < this->coarse_cells.size(); ++i) + temp.emplace_back(this->coarse_cell_index_to_coarse_cell_id[i], + this->coarse_cells[i], + i); + + std::sort(temp.begin(), temp.end(), [](const auto &a, const auto &b) { + return std::get<0>(a) < std::get<0>(b); + }); + temp.erase(std::unique(temp.begin(), + temp.end(), + [](const auto &a, const auto &b) { + return std::get<0>(a) == std::get<0>(b); + }), + temp.end()); + std::sort(temp.begin(), temp.end(), [](const auto &a, const auto &b) { + return std::get<2>(a) < std::get<2>(b); + }); + + this->coarse_cell_index_to_coarse_cell_id.resize(temp.size()); + this->coarse_cells.resize(temp.size()); + + for (unsigned int i = 0; i < temp.size(); ++i) + { + this->coarse_cell_index_to_coarse_cell_id[i] = + std::get<0>(temp[i]); + this->coarse_cells[i] = std::get<1>(temp[i]); + } + } + + // make coarse cell vertices unique + { + std::sort(this->coarse_cell_vertices.begin(), + this->coarse_cell_vertices.end(), + [](const auto &a, const auto &b) { + return a.first < b.first; + }); + this->coarse_cell_vertices.erase( + std::unique(this->coarse_cell_vertices.begin(), + this->coarse_cell_vertices.end(), + [](const auto &a, const auto &b) { + return a.first == b.first; + }), + this->coarse_cell_vertices.end()); + } + + // make cells unique + for (unsigned int i = 0; i < this->cell_infos.size(); ++i) + { + std::sort(this->cell_infos[i].begin(), + this->cell_infos[i].end(), + [](const auto &a, const auto &b) { return a.id < b.id; }); + this->cell_infos[i].erase(std::unique(this->cell_infos[i].begin(), + this->cell_infos[i].end(), + [](const auto &a, + const auto &b) { + return a.id == b.id; + }), + this->cell_infos[i].end()); + } + } + + Description + convert(const MPI_Comm comm, + const typename Triangulation::MeshSmoothing + mesh_smoothing) + { + Description description; + + // copy communicator + description.comm = comm; + + // set settings (no multigrid levels are set for now) + description.settings = + TriangulationDescription::Settings::default_setting; + + // use mesh smoothing from base triangulation + description.smoothing = mesh_smoothing; + + std::map map; + + for (unsigned int i = 0; i < this->coarse_cell_vertices.size(); ++i) + { + description.coarse_cell_vertices.push_back( + this->coarse_cell_vertices[i].second); + map[this->coarse_cell_vertices[i].first] = i; + } + + description.coarse_cells = this->coarse_cells; + + for (auto &cell : description.coarse_cells) + for (unsigned int v = 0; v < cell.vertices.size(); ++v) + cell.vertices[v] = map[cell.vertices[v]]; + + description.coarse_cell_index_to_coarse_cell_id = + this->coarse_cell_index_to_coarse_cell_id; + description.cell_infos = this->cell_infos; + + return description; + } + + std::vector> coarse_cells; + + std::vector>> + coarse_cell_vertices; + + std::vector coarse_cell_index_to_coarse_cell_id; + + std::vector>> cell_infos; + }; + + + + template + void + fill_cell_infos(const TriaIterator> &cell, + std::vector>> & cell_infos, + const LinearAlgebra::distributed::Vector &partition) + { + if (cell->user_flag_set()) + return; // cell has been already added -> nothing to do + + cell->set_user_flag(); + + CellData cell_info; + + // save coarse-cell id + cell_info.id = cell->id().template to_binary(); + + // save boundary_ids of each face of this cell + for (const auto f : cell->face_indices()) + { + types::boundary_id boundary_ind = cell->face(f)->boundary_id(); + if (boundary_ind != numbers::internal_face_boundary_id) + cell_info.boundary_ids.emplace_back(f, boundary_ind); + } + + // save manifold id + { + // ... of cell + cell_info.manifold_id = cell->manifold_id(); + + // ... of lines + if (dim >= 2) + for (const auto line : cell->line_indices()) + cell_info.manifold_line_ids[line] = cell->line(line)->manifold_id(); + + // ... of quads + if (dim == 3) + for (const auto f : cell->face_indices()) + cell_info.manifold_quad_ids[f] = cell->quad(f)->manifold_id(); + } + + // subdomain and level subdomain id + if (cell->is_active()) + cell_info.subdomain_id = static_cast( + partition[cell->global_active_cell_index()]); + else + cell_info.subdomain_id = numbers::artificial_subdomain_id; + + cell_info.level_subdomain_id = numbers::artificial_subdomain_id; + + cell_infos[cell->level()].emplace_back(cell_info); + + if (cell->level() != 0) // proceed with parent + fill_cell_infos(cell->parent(), cell_infos, partition); + } + + + + template + Description + create_description_from_triangulation( + const Triangulation & tria, + const LinearAlgebra::distributed::Vector &partition) + { + // 1) determine processes owning locally owned cells + const std::vector relevant_processes = [&]() { + std::set relevant_processes; + + for (unsigned int i = 0; i < partition.local_size(); ++i) + relevant_processes.insert( + static_cast(partition.local_element(i))); + + return std::vector(relevant_processes.begin(), + relevant_processes.end()); + }(); + + // 2) determine coinciding vertices (important for periodicity) + std::map> + coinciding_vertex_groups; + std::map vertex_to_coinciding_vertex_group; + + GridTools::collect_coinciding_vertices(tria, + coinciding_vertex_groups, + vertex_to_coinciding_vertex_group); + + const auto add_vertices_of_cell_to_vertices_owned_by_locally_owned_cells = + [&coinciding_vertex_groups, &vertex_to_coinciding_vertex_group]( + const auto & cell, + std::vector &vertices_owned_by_locally_owned_cells) { + for (const auto v : cell->vertex_indices()) + { + vertices_owned_by_locally_owned_cells[cell->vertex_index(v)] = + true; + const auto coinciding_vertex_group = + vertex_to_coinciding_vertex_group.find(cell->vertex_index(v)); + if (coinciding_vertex_group != + vertex_to_coinciding_vertex_group.end()) + for (const auto &co_vertex : coinciding_vertex_groups.at( + coinciding_vertex_group->second)) + vertices_owned_by_locally_owned_cells[co_vertex] = true; + } + }; + + // 3) create a description (locally owned cell and a layer of ghost cells + // and all their parents) + std::vector> description_temp( + relevant_processes.size()); + + for (unsigned int i = 0; i < description_temp.size(); ++i) + { + const unsigned int proc = relevant_processes[i]; + auto & description_temp_i = description_temp[i]; + description_temp_i.cell_infos.resize( + tria.get_triangulation().n_global_levels()); + + // clear user_flags + std::vector old_user_flags; + tria.save_user_flags(old_user_flags); + + const_cast &>(tria) + .clear_user_flags(); + + // mark all vertices attached to locally owned cells + std::vector vertices_owned_by_locally_owned_cells( + tria.n_vertices()); + for (const auto &cell : tria.active_cell_iterators()) + if (cell->is_locally_owned() && + static_cast( + partition[cell->global_active_cell_index()]) == proc) + add_vertices_of_cell_to_vertices_owned_by_locally_owned_cells( + cell, vertices_owned_by_locally_owned_cells); + + // helper function if a cell is locally relevant (active and + // connected to cell via a vertex) + const auto is_locally_relevant_on_level = [&](const auto &cell) { + for (const auto v : cell->vertex_indices()) + if (vertices_owned_by_locally_owned_cells[cell->vertex_index(v)]) + return true; + return false; + }; + + // collect locally relevant cells (including their parents) + for (const auto &cell : tria.active_cell_iterators()) + if (is_locally_relevant_on_level(cell)) + fill_cell_infos(cell, description_temp_i.cell_infos, partition); + + // collect coarse-grid cells + std::vector vertices_locally_relevant(tria.n_vertices(), false); + + for (const auto &cell : tria.cell_iterators_on_level(0)) + { + if (!cell->user_flag_set()) + continue; + + // extract cell definition + dealii::CellData cell_data(cell->n_vertices()); + cell_data.material_id = cell->material_id(); + cell_data.manifold_id = cell->manifold_id(); + for (const auto v : cell->vertex_indices()) + cell_data.vertices[v] = cell->vertex_index(v); + description_temp_i.coarse_cells.push_back(cell_data); + + // mark cell vertices as relevant + for (const auto v : cell->vertex_indices()) + vertices_locally_relevant[cell->vertex_index(v)] = true; + + // save translation for corase grid: lid -> gid + description_temp_i.coarse_cell_index_to_coarse_cell_id.push_back( + cell->id().get_coarse_cell_id()); + } + + // collect coarse-grid vertices + for (unsigned int i = 0; i < vertices_locally_relevant.size(); ++i) + if (vertices_locally_relevant[i]) + description_temp_i.coarse_cell_vertices.emplace_back( + i, tria.get_vertices()[i]); + + // restore flags + const_cast &>(tria) + .load_user_flags(old_user_flags); + } + + // collect description from all processes that used to own locally-owned + // active cells of this process in a single description + DescriptionTemp description_merged; + + dealii::Utilities::MPI::ConsensusAlgorithms::AnonymousProcess + process([&]() { return relevant_processes; }, + [&](const unsigned int other_rank, + std::vector &send_buffer) { + const auto ptr = std::find(relevant_processes.begin(), + relevant_processes.end(), + other_rank); + + Assert(ptr != relevant_processes.end(), ExcInternalError()); + + const auto other_rank_index = + std::distance(relevant_processes.begin(), ptr); + + send_buffer = + dealii::Utilities::pack(description_temp[other_rank_index], + false); + }, + [&](const unsigned int &, + const std::vector &recv_buffer, + std::vector &) { + description_merged.merge( + dealii::Utilities::unpack>( + recv_buffer, false)); + }); + + dealii::Utilities::MPI::ConsensusAlgorithms::Selector( + process, tria.get_communicator()) + .run(); + + // remove redundant entries + description_merged.reduce(); + + // convert to actual description + return description_merged.convert(tria.get_communicator(), + tria.get_mesh_smoothing()); + } + } // namespace Utilities } // namespace TriangulationDescription diff --git a/source/grid/tria_description.inst.in b/source/grid/tria_description.inst.in index 9ca20f0157..fca624f43e 100644 --- a/source/grid/tria_description.inst.in +++ b/source/grid/tria_description.inst.in @@ -45,6 +45,11 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : DIMENSIONS) deal_II_space_dimension>::MeshSmoothing smoothing, const TriangulationDescription::Settings); + + template Description + create_description_from_triangulation( + const Triangulation &tria, + const LinearAlgebra::distributed::Vector &partition); #endif \} \} diff --git a/tests/fullydistributed_grids/repartitioning_01.cc b/tests/fullydistributed_grids/repartitioning_01.cc new file mode 100644 index 0000000000..f840ea9ba8 --- /dev/null +++ b/tests/fullydistributed_grids/repartitioning_01.cc @@ -0,0 +1,113 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2021 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Test +// TriangulationDescription::Utilities::create_description_from_triangulation() +// with repartitioning capabilities. + +#include + +#include +#include + +#include + +#include +#include +#include +#include + +#include + +#include + +#include "./tests.h" + +using namespace dealii; + + +template +LinearAlgebra::distributed::Vector +partition_distributed_triangulation(const Triangulation &tria_in, + const unsigned int n_partitions) +{ + const auto tria = + dynamic_cast *>(&tria_in); + + Assert(tria, ExcNotImplemented()); + + LinearAlgebra::distributed::Vector partition( + tria->global_active_cell_index_partitioner().lock()); + + for (const auto &cell : tria_in.active_cell_iterators()) + if (cell->is_locally_owned()) + partition[cell->global_active_cell_index()] = + std::floor(cell->center()[0] * n_partitions); + + partition.update_ghost_values(); + + return partition; +} + + +template +void +test(const MPI_Comm comm, const unsigned int n_partitions) +{ + parallel::distributed::Triangulation tria(comm); + GridGenerator::subdivided_hyper_cube(tria, 4); + tria.refine_global(3); + + const auto partition_new = + partition_distributed_triangulation(tria, n_partitions); + + // repartition triangulation so that it has strided partitioning + const auto construction_data = + TriangulationDescription::Utilities::create_description_from_triangulation( + tria, partition_new); + + parallel::fullydistributed::Triangulation tria_pft(comm); + tria_pft.create_triangulation(construction_data); + + FE_Q fe(2); + DoFHandler dof_handler(tria_pft); + dof_handler.distribute_dofs(fe); + + // print statistics + print_statistics(tria_pft); + print_statistics(dof_handler); +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi(argc, argv, 1); + MPILogInitAll all; + + MPI_Comm comm = MPI_COMM_WORLD; + + deallog.push("all"); + test<2>(comm, Utilities::MPI::n_mpi_processes(comm)); + deallog.pop(); + + // test that we can eliminate processes + deallog.push("reduced"); + test<2>(comm, + std::max(1, Utilities::MPI::n_mpi_processes(comm) / 2)); + deallog.pop(); +} diff --git a/tests/fullydistributed_grids/repartitioning_01.mpirun=4.output b/tests/fullydistributed_grids/repartitioning_01.mpirun=4.output new file mode 100644 index 0000000000..9360fea8bc --- /dev/null +++ b/tests/fullydistributed_grids/repartitioning_01.mpirun=4.output @@ -0,0 +1,63 @@ + +DEAL:0:all::n_levels: 4 +DEAL:0:all::n_cells: 456 +DEAL:0:all::n_active_cells: 344 +DEAL:0:all:: +DEAL:0:all::n_dofs: 4225 +DEAL:0:all::n_locally_owned_dofs: 1105 +DEAL:0:all:: +DEAL:0:reduced::n_levels: 4 +DEAL:0:reduced::n_cells: 796 +DEAL:0:reduced::n_active_cells: 600 +DEAL:0:reduced:: +DEAL:0:reduced::n_dofs: 4225 +DEAL:0:reduced::n_locally_owned_dofs: 2145 +DEAL:0:reduced:: + +DEAL:1:all::n_levels: 4 +DEAL:1:all::n_cells: 572 +DEAL:1:all::n_active_cells: 432 +DEAL:1:all:: +DEAL:1:all::n_dofs: 4225 +DEAL:1:all::n_locally_owned_dofs: 1040 +DEAL:1:all:: +DEAL:1:reduced::n_levels: 4 +DEAL:1:reduced::n_cells: 796 +DEAL:1:reduced::n_active_cells: 600 +DEAL:1:reduced:: +DEAL:1:reduced::n_dofs: 4225 +DEAL:1:reduced::n_locally_owned_dofs: 2080 +DEAL:1:reduced:: + + +DEAL:2:all::n_levels: 4 +DEAL:2:all::n_cells: 572 +DEAL:2:all::n_active_cells: 432 +DEAL:2:all:: +DEAL:2:all::n_dofs: 4225 +DEAL:2:all::n_locally_owned_dofs: 1040 +DEAL:2:all:: +DEAL:2:reduced::n_levels: 1 +DEAL:2:reduced::n_cells: 1 +DEAL:2:reduced::n_active_cells: 1 +DEAL:2:reduced:: +DEAL:2:reduced::n_dofs: 4225 +DEAL:2:reduced::n_locally_owned_dofs: 0 +DEAL:2:reduced:: + + +DEAL:3:all::n_levels: 4 +DEAL:3:all::n_cells: 456 +DEAL:3:all::n_active_cells: 344 +DEAL:3:all:: +DEAL:3:all::n_dofs: 4225 +DEAL:3:all::n_locally_owned_dofs: 1040 +DEAL:3:all:: +DEAL:3:reduced::n_levels: 1 +DEAL:3:reduced::n_cells: 1 +DEAL:3:reduced::n_active_cells: 1 +DEAL:3:reduced:: +DEAL:3:reduced::n_dofs: 4225 +DEAL:3:reduced::n_locally_owned_dofs: 0 +DEAL:3:reduced:: + diff --git a/tests/fullydistributed_grids/tests.h b/tests/fullydistributed_grids/tests.h index d0ae2ca131..4029e7f7b5 100644 --- a/tests/fullydistributed_grids/tests.h +++ b/tests/fullydistributed_grids/tests.h @@ -21,9 +21,7 @@ template void -print_statistics( - const parallel::fullydistributed::Triangulation &tria, - bool do_mg = false) +print_statistics(const Triangulation &tria, bool do_mg = false) { deallog << "n_levels: " << tria.n_levels() << std::endl; deallog << "n_cells: " << tria.n_cells() << std::endl; -- 2.39.5