From fe5a9e02a916c5431a22b4aadb1442094f88b109 Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Mon, 18 Sep 2023 11:06:57 +0200 Subject: [PATCH] Changes in p:f:T --- include/deal.II/grid/tria_description.h | 39 ++++- .../create_manually_01.cc | 148 ++++++++++++++++++ .../create_manually_01.mpirun=1.output | 10 ++ .../create_manually_01.mpirun=4.output | 43 +++++ 4 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 tests/fullydistributed_grids/create_manually_01.cc create mode 100644 tests/fullydistributed_grids/create_manually_01.mpirun=1.output create mode 100644 tests/fullydistributed_grids/create_manually_01.mpirun=4.output diff --git a/include/deal.II/grid/tria_description.h b/include/deal.II/grid/tria_description.h index e98083245c..6e04aef20f 100644 --- a/include/deal.II/grid/tria_description.h +++ b/include/deal.II/grid/tria_description.h @@ -325,6 +325,11 @@ namespace TriangulationDescription template struct CellData { + /** + * Constructor + */ + CellData(); + /** * Read or write the data of this object to or from a stream for the * purpose of serialization using the [BOOST serialization @@ -387,9 +392,14 @@ namespace TriangulationDescription /** * Data used in Triangulation::create_triangulation(). */ - template + template struct Description { + /** + * Constructor. + */ + Description(); + /** * Read or write the data of this object to or from a stream for the * purpose of serialization using the [BOOST serialization @@ -654,6 +664,23 @@ namespace TriangulationDescription + template + CellData::CellData() + : subdomain_id(numbers::invalid_subdomain_id) + , level_subdomain_id(numbers::invalid_subdomain_id) + , manifold_id(numbers::flat_manifold_id) + { + std::fill(id.begin(), id.end(), numbers::invalid_unsigned_int); + std::fill(manifold_line_ids.begin(), + manifold_line_ids.end(), + numbers::flat_manifold_id); + std::fill(manifold_quad_ids.begin(), + manifold_quad_ids.end(), + numbers::flat_manifold_id); + } + + + template template void @@ -671,6 +698,16 @@ namespace TriangulationDescription } + + template + Description::Description() + : comm(MPI_COMM_NULL) + , settings(Settings::default_setting) + , smoothing(Triangulation::MeshSmoothing::none) + {} + + + template template void diff --git a/tests/fullydistributed_grids/create_manually_01.cc b/tests/fullydistributed_grids/create_manually_01.cc new file mode 100644 index 0000000000..127acf17f9 --- /dev/null +++ b/tests/fullydistributed_grids/create_manually_01.cc @@ -0,0 +1,148 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Create a parallel::fullydistributed::Triangulation manually. + +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "../grid/tests.h" + +using namespace dealii; + +template +void +test(MPI_Comm comm) +{ + // subdivided hypercube settings + std::array n_subdivision = {{5, 6}}; + std::array sizes = {{1.0, 1.0}}; + + // parallel info + const auto n_ranks = Utilities::MPI::n_mpi_processes(comm); + const auto my_rank = Utilities::MPI::this_mpi_process(comm); + + // create description + TriangulationDescription::Description construction_data; + construction_data.comm = comm; + construction_data.cell_infos.resize(1); + + const auto is_local = + Utilities::create_evenly_distributed_partitioning(my_rank, + n_ranks, + n_subdivision[1]); + + if (!is_local.is_empty()) + { + // 1) collect vertices + for (unsigned int j = std::max(0, is_local.nth_index_in_set(0) - 1); + j <= std::min( + is_local.nth_index_in_set(is_local.n_elements() - 1) + 3, + n_subdivision[1] + 1); + ++j) + for (unsigned int i = 0; i <= n_subdivision[0]; ++i) + construction_data.coarse_cell_vertices.emplace_back( + sizes[0] / n_subdivision[0] * i, sizes[1] / n_subdivision[1] * j); + + // 2) setup cells + for (unsigned int j = std::max(0, is_local.nth_index_in_set(0) - 1), + j_local = 0; + j < std::min( + is_local.nth_index_in_set(is_local.n_elements() - 1) + 2, + n_subdivision[1]); + ++j, ++j_local) + for (unsigned int i = 0; i < n_subdivision[0]; ++i) + { + const unsigned int c_global = i + j * n_subdivision[0]; + + // 2a) coarse cell info + CellData cell_data; + + const unsigned int stride = n_subdivision[0] + 1; + cell_data.vertices[0] = (i + 0) + (j_local + 0) * stride; + cell_data.vertices[1] = (i + 1) + (j_local + 0) * stride; + cell_data.vertices[2] = (i + 0) + (j_local + 1) * stride; + cell_data.vertices[3] = (i + 1) + (j_local + 1) * stride; + + construction_data.coarse_cells.emplace_back(cell_data); + + // 2b) local to global cell id map + construction_data.coarse_cell_index_to_coarse_cell_id.emplace_back( + c_global); + + // 2c) level cell info + TriangulationDescription::CellData cell_data_; + + cell_data_.id = CellId(c_global, {}).template to_binary(); + cell_data_.subdomain_id = + is_local.is_element(j) ? + my_rank : + (j < is_local.nth_index_in_set(0) ? (my_rank - 1) : + (my_rank + 1)); + cell_data_.level_subdomain_id = cell_data_.subdomain_id; + + if (i == 0) + cell_data_.boundary_ids.emplace_back(0, 0); + else if (i + 1 == n_subdivision[0]) + cell_data_.boundary_ids.emplace_back(1, 1); + else if (j == 0) + cell_data_.boundary_ids.emplace_back(2, 2); + else if (j + 1 == n_subdivision[1]) + cell_data_.boundary_ids.emplace_back(3, 3); + + construction_data.cell_infos[0].emplace_back(cell_data_); + } + } + + // actually create triangulation + parallel::fullydistributed::Triangulation tria_pft(comm); + tria_pft.create_triangulation(construction_data); + + // test triangulation + FE_Q fe(2); + DoFHandler dof_handler(tria_pft); + dof_handler.distribute_dofs(fe); + + // print statistics + print_statistics(tria_pft); + print_statistics(dof_handler); +} + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + const MPI_Comm comm = MPI_COMM_WORLD; + + { + deallog.push("2d"); + test<2>(comm); + deallog.pop(); + } +} diff --git a/tests/fullydistributed_grids/create_manually_01.mpirun=1.output b/tests/fullydistributed_grids/create_manually_01.mpirun=1.output new file mode 100644 index 0000000000..63fa59103f --- /dev/null +++ b/tests/fullydistributed_grids/create_manually_01.mpirun=1.output @@ -0,0 +1,10 @@ + +DEAL:0:2d::n_levels: 1 +DEAL:0:2d::n_cells: 30 +DEAL:0:2d::n_active_cells: 30 +DEAL:0:2d::has_hanging_nodes: false +DEAL:0:2d:: +DEAL:0:2d::n_dofs: 143 +DEAL:0:2d::n_locally_owned_dofs: 143 +DEAL:0:2d::has_hanging_nodes: false +DEAL:0:2d:: diff --git a/tests/fullydistributed_grids/create_manually_01.mpirun=4.output b/tests/fullydistributed_grids/create_manually_01.mpirun=4.output new file mode 100644 index 0000000000..ad2a9e140c --- /dev/null +++ b/tests/fullydistributed_grids/create_manually_01.mpirun=4.output @@ -0,0 +1,43 @@ + +DEAL:0:2d::n_levels: 1 +DEAL:0:2d::n_cells: 15 +DEAL:0:2d::n_active_cells: 15 +DEAL:0:2d::has_hanging_nodes: false +DEAL:0:2d:: +DEAL:0:2d::n_dofs: 143 +DEAL:0:2d::n_locally_owned_dofs: 55 +DEAL:0:2d::has_hanging_nodes: false +DEAL:0:2d:: + +DEAL:1:2d::n_levels: 1 +DEAL:1:2d::n_cells: 20 +DEAL:1:2d::n_active_cells: 20 +DEAL:1:2d::has_hanging_nodes: false +DEAL:1:2d:: +DEAL:1:2d::n_dofs: 143 +DEAL:1:2d::n_locally_owned_dofs: 44 +DEAL:1:2d::has_hanging_nodes: false +DEAL:1:2d:: + + +DEAL:2:2d::n_levels: 1 +DEAL:2:2d::n_cells: 15 +DEAL:2:2d::n_active_cells: 15 +DEAL:2:2d::has_hanging_nodes: false +DEAL:2:2d:: +DEAL:2:2d::n_dofs: 143 +DEAL:2:2d::n_locally_owned_dofs: 22 +DEAL:2:2d::has_hanging_nodes: false +DEAL:2:2d:: + + +DEAL:3:2d::n_levels: 1 +DEAL:3:2d::n_cells: 10 +DEAL:3:2d::n_active_cells: 10 +DEAL:3:2d::has_hanging_nodes: false +DEAL:3:2d:: +DEAL:3:2d::n_dofs: 143 +DEAL:3:2d::n_locally_owned_dofs: 22 +DEAL:3:2d::has_hanging_nodes: false +DEAL:3:2d:: + -- 2.39.5