From: David Wells Date: Wed, 27 Feb 2019 22:13:25 +0000 (-0500) Subject: Fix a bug in make_sparsity_pattern(dh, dh, sp). X-Git-Tag: v9.1.0-rc1~278^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1ee851f4cbec17bb980c845ca415721d8598ec04;p=dealii.git Fix a bug in make_sparsity_pattern(dh, dh, sp). This function relied GridTools::get_finest_common_cells which was written before any of the parallel triangulations were implemented and thus assumed that all cells without children were active. This commit modifies that function to only return active cells that are also locally owned, which fixes the sparsity pattern function. --- diff --git a/doc/news/changes/minor/20190304DavidWells b/doc/news/changes/minor/20190304DavidWells new file mode 100644 index 0000000000..b0cd08c43a --- /dev/null +++ b/doc/news/changes/minor/20190304DavidWells @@ -0,0 +1,5 @@ +Fixed: the DoFTools::make_sparsity_pattern variant that takes, as arguments, two +different DoFHandler objects now works correctly with +parallel::shared::Triangulation and parallel::distributed::Triangulation. +
+(David Wells, 2019/03/04) diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index 00e434cc3a..df3711d54f 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -2018,6 +2018,11 @@ namespace GridTools * * @tparam MeshType A type that satisfies the requirements of the * @ref ConceptMeshType "MeshType concept". + * + * @note This function can only be used with + * parallel::distributed::Triangulation when both meshes use the same + * Triangulation since, with a distributed Triangulation, not all cells are + * stored locally, so the resulting list may not cover the entire domain. */ template std::list #include +#include +#include + #include #include #include @@ -222,23 +225,59 @@ namespace DoFTools Assert(sparsity.n_cols() == n_dofs_col, ExcDimensionMismatch(sparsity.n_cols(), n_dofs_col)); - // TODO: Looks like wasteful memory management here - - const std::list> - cell_list = GridTools::get_finest_common_cells(dof_row, dof_col); + // It doesn't make sense to assemble sparsity patterns when the + // Triangulations are both parallel (i.e., different cells are assigned to + // different processors) and unequal: no processor will be responsible for + // assembling coupling terms between dofs on a cell owned by one processor + // and dofs on a cell owned by a different processor. + constexpr int dim = DoFHandlerType::dimension; + constexpr int spacedim = DoFHandlerType::space_dimension; + if (dynamic_cast *>( + &dof_row.get_triangulation()) != nullptr || + dynamic_cast *>( + &dof_col.get_triangulation()) != nullptr) + { + Assert(&dof_row.get_triangulation() == &dof_col.get_triangulation(), + ExcMessage("This function can only be used with with parallel " + "Triangulations when the Triangulations are equal.")); + } + // TODO: Looks like wasteful memory management here - typename std::list>:: - const_iterator cell_iter = cell_list.begin(); + using cell_iterator = typename DoFHandlerType::cell_iterator; + std::list> cell_list = + GridTools::get_finest_common_cells(dof_row, dof_col); + +#ifdef DEAL_II_WITH_MPI + // get_finest_common_cells returns all cells (locally owned and otherwise) + // for shared::Tria, but we don't want to assemble on cells that are not + // locally owned so remove them + if (dynamic_cast *>( + &dof_row.get_triangulation()) != nullptr || + dynamic_cast *>( + &dof_col.get_triangulation()) != nullptr) + { + const types::subdomain_id this_subdomain_id = + dof_row.get_triangulation().locally_owned_subdomain(); + Assert(this_subdomain_id == + dof_col.get_triangulation().locally_owned_subdomain(), + ExcInternalError()); + cell_list.erase( + std::remove_if( + cell_list.begin(), + cell_list.end(), + [=](const std::pair &pair) { + return pair.first->subdomain_id() != this_subdomain_id || + pair.second->subdomain_id() != this_subdomain_id; + }), + cell_list.end()); + } +#endif - for (; cell_iter != cell_list.end(); ++cell_iter) + for (const auto &cell_pair : cell_list) { - const typename DoFHandlerType::cell_iterator cell_row = - cell_iter->first; - const typename DoFHandlerType::cell_iterator cell_col = - cell_iter->second; + const cell_iterator cell_row = cell_pair.first; + const cell_iterator cell_col = cell_pair.second; if (!cell_row->has_children() && !cell_col->has_children()) { diff --git a/source/grid/grid_tools_dof_handlers.cc b/source/grid/grid_tools_dof_handlers.cc index 208ece7de1..ba09b68e1d 100644 --- a/source/grid/grid_tools_dof_handlers.cc +++ b/source/grid/grid_tools_dof_handlers.cc @@ -1135,6 +1135,30 @@ namespace GridTools Assert(have_same_coarse_mesh(mesh_1, mesh_2), ExcMessage("The two meshes must be represent triangulations that " "have the same coarse meshes")); + // We will allow the output to contain ghost cells when we have shared + // Triangulations (i.e., so that each processor will get exactly the same + // list of cell pairs), but not when we have two distributed + // Triangulations (so that all active cells are partitioned by processor). + // Non-parallel Triangulations have no ghost or artificial cells, so they + // work the same way as shared Triangulations here. + bool remove_ghost_cells = false; +#ifdef DEAL_II_WITH_MPI + { + constexpr int dim = MeshType::dimension; + constexpr int spacedim = MeshType::space_dimension; + if (dynamic_cast + *>(&mesh_1.get_triangulation()) != nullptr || + dynamic_cast + *>(&mesh_2.get_triangulation()) != nullptr) + { + Assert(&mesh_1.get_triangulation() == &mesh_2.get_triangulation(), + ExcMessage("This function can only be used with meshes " + "corresponding to distributed Triangulations when " + "both Triangulations are equal.")); + remove_ghost_cells = true; + } + } +#endif // the algorithm goes as follows: first, we fill a list with pairs of // iterators common to the two meshes on the coarsest level. then we @@ -1172,14 +1196,27 @@ namespace GridTools // erasing an iterator keeps other iterators valid, so already // advance the present iterator by one and then delete the element // we've visited before - const typename CellList::iterator previous_cell_pair = cell_pair; + const auto previous_cell_pair = cell_pair; ++cell_pair; - cell_list.erase(previous_cell_pair); } else - // both cells are active, do nothing - ++cell_pair; + { + // at least one cell is active + if (remove_ghost_cells && + ((cell_pair->first->active() && + !cell_pair->first->is_locally_owned()) || + (cell_pair->second->active() && + !cell_pair->second->is_locally_owned()))) + { + // we only exclude ghost cells for distributed Triangulations + const auto previous_cell_pair = cell_pair; + ++cell_pair; + cell_list.erase(previous_cell_pair); + } + else + ++cell_pair; + } } // just to make sure everything is ok, validate that all pairs have at diff --git a/tests/dofs/sparsity_pattern_06.cc b/tests/dofs/sparsity_pattern_06.cc new file mode 100644 index 0000000000..3a98472f1c --- /dev/null +++ b/tests/dofs/sparsity_pattern_06.cc @@ -0,0 +1,147 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// Check that we can create a sparsity pattern from two DoFHandlers with the +// same distributed mesh. This previously failed since the implementation +// called a GridTools function that assumed all cells without children were +// active (which is not the case for distributed meshes). + +#include + +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include "../tests.h" + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + MPILogInitAll all; + + parallel::distributed::Triangulation<2> triangulation(MPI_COMM_WORLD); + GridGenerator::subdivided_hyper_rectangle(triangulation, + {4u, 1u}, + {0.0, 0.0}, + {4.0, 1.0}); + triangulation.refine_global(1); + deallog << "global number of active cells: " + << triangulation.n_global_active_cells() << std::endl; + deallog << "local number of active cells: " << triangulation.n_active_cells() + << std::endl; + deallog << "number of locally owned active cells: " + << triangulation.n_locally_owned_active_cells() << std::endl; + + FE_DGQ<2> fe_0(0); + DoFHandler<2> dof_handler_0; + FE_Q<2> fe_1(1); + DoFHandler<2> dof_handler_1; + dof_handler_0.initialize(triangulation, fe_0); + dof_handler_1.initialize(triangulation, fe_1); + + IndexSet locally_relevant_dofs_0; + IndexSet locally_relevant_dofs_1; + DoFTools::extract_locally_relevant_dofs(dof_handler_0, + locally_relevant_dofs_0); + DoFTools::extract_locally_relevant_dofs(dof_handler_1, + locally_relevant_dofs_1); + deallog << "locally owned dofs 0: "; + dof_handler_0.locally_owned_dofs().print(deallog); + deallog << std::endl; + deallog << "locally owned dofs 1: "; + dof_handler_1.locally_owned_dofs().print(deallog); + deallog << std::endl; + deallog << "locally relevant dofs 0: "; + locally_relevant_dofs_0.print(deallog); + deallog << std::endl; + deallog << "locally relevant dofs 1: "; + locally_relevant_dofs_1.print(deallog); + deallog << std::endl; + +#if 0 // reenable this to see where the DoFs are + MappingCartesian<2> mapping; + { + deallog << "support points of dofs 0:" << std::endl; + std::vector cell_dofs(fe_0.dofs_per_cell); + std::size_t cell_n = 0; + for (const auto &cell : dof_handler_0.active_cell_iterators()) + { + if (cell->is_locally_owned()) + { + deallog << "cell_n = " << cell_n << std::endl; + const std::vector> &support_points = fe_0.get_unit_support_points(); + cell->get_dof_indices(cell_dofs); + for (std::size_t dof_n = 0; dof_n < fe_0.dofs_per_cell; ++dof_n) + { + deallog << "dof " << std::setw(2) << cell_dofs[dof_n] << ": " + << mapping.transform_unit_to_real_cell(cell, support_points[dof_n]) + << std::endl; + } + ++cell_n; + } + } + } + + { + deallog << "support points of dofs 1:" << std::endl; + std::vector cell_dofs(fe_1.dofs_per_cell); + std::size_t cell_n = 0; + for (const auto &cell : dof_handler_1.active_cell_iterators()) + { + if (cell->is_locally_owned()) + { + deallog << "cell_n = " << cell_n << std::endl; + const std::vector> &support_points = fe_1.get_unit_support_points(); + cell->get_dof_indices(cell_dofs); + for (std::size_t dof_n = 0; dof_n < fe_1.dofs_per_cell; ++dof_n) + { + deallog << "dof " << std::setw(2) << cell_dofs[dof_n] << ": " + << mapping.transform_unit_to_real_cell(cell, support_points[dof_n]) + << std::endl; + } + ++cell_n; + } + } + } +#endif + + DynamicSparsityPattern dynamic_sparsity_pattern( + dof_handler_0.locally_owned_dofs().size(), + dof_handler_1.locally_owned_dofs().size(), + dof_handler_0.locally_owned_dofs()); + DoFTools::make_sparsity_pattern(dof_handler_0, + dof_handler_1, + dynamic_sparsity_pattern); + dynamic_sparsity_pattern.print(deallog.get_file_stream()); + SparsityTools::distribute_sparsity_pattern( + dynamic_sparsity_pattern, + dof_handler_0.n_locally_owned_dofs_per_processor(), + MPI_COMM_WORLD, + dof_handler_0.locally_owned_dofs()); + dynamic_sparsity_pattern.print(deallog.get_file_stream()); + + deallog << "OK" << std::endl; +} diff --git a/tests/dofs/sparsity_pattern_06.with_p4est=on.mpirun=1.output b/tests/dofs/sparsity_pattern_06.with_p4est=on.mpirun=1.output new file mode 100644 index 0000000000..64245124db --- /dev/null +++ b/tests/dofs/sparsity_pattern_06.with_p4est=on.mpirun=1.output @@ -0,0 +1,45 @@ + +DEAL:0::global number of active cells: 16 +DEAL:0::local number of active cells: 16 +DEAL:0::number of locally owned active cells: 16 +DEAL:0::locally owned dofs 0: {[0,15]} +DEAL:0:: +DEAL:0::locally owned dofs 1: {[0,26]} +DEAL:0:: +DEAL:0::locally relevant dofs 0: {[0,15]} +DEAL:0:: +DEAL:0::locally relevant dofs 1: {[0,26]} +DEAL:0:: +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +DEAL:0::OK diff --git a/tests/dofs/sparsity_pattern_06.with_p4est=on.mpirun=2.output b/tests/dofs/sparsity_pattern_06.with_p4est=on.mpirun=2.output new file mode 100644 index 0000000000..f715e4d774 --- /dev/null +++ b/tests/dofs/sparsity_pattern_06.with_p4est=on.mpirun=2.output @@ -0,0 +1,59 @@ + +DEAL:0::global number of active cells: 16 +DEAL:0::local number of active cells: 13 +DEAL:0::number of locally owned active cells: 8 +DEAL:0::locally owned dofs 0: {[0,7]} +DEAL:0:: +DEAL:0::locally owned dofs 1: {[0,14]} +DEAL:0:: +DEAL:0::locally relevant dofs 0: {[0,8], 10} +DEAL:0:: +DEAL:0::locally relevant dofs 1: {[0,16], 19} +DEAL:0:: +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +DEAL:0::OK + +DEAL:1::global number of active cells: 16 +DEAL:1::local number of active cells: 13 +DEAL:1::number of locally owned active cells: 8 +DEAL:1::locally owned dofs 0: {[8,15]} +DEAL:1:: +DEAL:1::locally owned dofs 1: {[15,26]} +DEAL:1:: +DEAL:1::locally relevant dofs 0: {5, [7,15]} +DEAL:1:: +DEAL:1::locally relevant dofs 1: {[9,26]} +DEAL:1:: +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +DEAL:1::OK + diff --git a/tests/dofs/sparsity_pattern_07.cc b/tests/dofs/sparsity_pattern_07.cc new file mode 100644 index 0000000000..53d154bf20 --- /dev/null +++ b/tests/dofs/sparsity_pattern_07.cc @@ -0,0 +1,164 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// Check that we can create a sparsity pattern from two DoFHandlers with the +// same parallel mesh. This previously failed since the implementation +// called a GridTools function that assumed all cells without children were +// active (which is not the case for distributed meshes). +// +// This is the same as sparsity_pattern_06 but checks with a shared +// triangulation instead of a distributed one. + +#include + +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include "../tests.h" + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + MPILogInitAll all; + + parallel::shared::Triangulation<2> triangulation(MPI_COMM_WORLD); + GridGenerator::subdivided_hyper_rectangle(triangulation, + {4u, 1u}, + {0.0, 0.0}, + {4.0, 1.0}); + triangulation.refine_global(1); + const unsigned int n_processes = + Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + Assert(n_processes == 1 || n_processes == 2, ExcInternalError()); + if (n_processes == 2) + { + for (auto &cell : triangulation.active_cell_iterators()) + { + if (cell->center()[0] < 2.0) + cell->set_subdomain_id(0); + else + cell->set_subdomain_id(1); + } + } + + deallog << "global number of active cells: " + << triangulation.n_global_active_cells() << std::endl; + deallog << "local number of active cells: " << triangulation.n_active_cells() + << std::endl; + deallog << "number of locally owned active cells: " + << triangulation.n_locally_owned_active_cells() << std::endl; + + FE_DGQ<2> fe_0(0); + DoFHandler<2> dof_handler_0; + FE_Q<2> fe_1(1); + DoFHandler<2> dof_handler_1; + dof_handler_0.initialize(triangulation, fe_0); + dof_handler_1.initialize(triangulation, fe_1); + + IndexSet locally_relevant_dofs_0; + IndexSet locally_relevant_dofs_1; + DoFTools::extract_locally_relevant_dofs(dof_handler_0, + locally_relevant_dofs_0); + DoFTools::extract_locally_relevant_dofs(dof_handler_1, + locally_relevant_dofs_1); + deallog << "locally owned dofs 0: "; + dof_handler_0.locally_owned_dofs().print(deallog); + deallog << std::endl; + deallog << "locally owned dofs 1: "; + dof_handler_1.locally_owned_dofs().print(deallog); + deallog << std::endl; + deallog << "locally relevant dofs 0: "; + locally_relevant_dofs_0.print(deallog); + deallog << std::endl; + deallog << "locally relevant dofs 1: "; + locally_relevant_dofs_1.print(deallog); + deallog << std::endl; + +#if 0 // reenable this to see where the DoFs are + MappingCartesian<2> mapping; + { + deallog << "support points of dofs 0:" << std::endl; + std::vector cell_dofs(fe_0.dofs_per_cell); + std::size_t cell_n = 0; + for (const auto &cell : dof_handler_0.active_cell_iterators()) + { + if (cell->is_locally_owned()) + { + deallog << "cell_n = " << cell_n << std::endl; + const std::vector> &support_points = fe_0.get_unit_support_points(); + cell->get_dof_indices(cell_dofs); + for (std::size_t dof_n = 0; dof_n < fe_0.dofs_per_cell; ++dof_n) + { + deallog << "dof " << std::setw(2) << cell_dofs[dof_n] << ": " + << mapping.transform_unit_to_real_cell(cell, support_points[dof_n]) + << std::endl; + } + ++cell_n; + } + } + } + + { + deallog << "support points of dofs 1:" << std::endl; + std::vector cell_dofs(fe_1.dofs_per_cell); + std::size_t cell_n = 0; + for (const auto &cell : dof_handler_1.active_cell_iterators()) + { + if (cell->is_locally_owned()) + { + deallog << "cell_n = " << cell_n << std::endl; + const std::vector> &support_points = fe_1.get_unit_support_points(); + cell->get_dof_indices(cell_dofs); + for (std::size_t dof_n = 0; dof_n < fe_1.dofs_per_cell; ++dof_n) + { + deallog << "dof " << std::setw(2) << cell_dofs[dof_n] << ": " + << mapping.transform_unit_to_real_cell(cell, support_points[dof_n]) + << std::endl; + } + ++cell_n; + } + } + } +#endif + + DynamicSparsityPattern dynamic_sparsity_pattern( + dof_handler_0.locally_owned_dofs().size(), + dof_handler_1.locally_owned_dofs().size(), + dof_handler_0.locally_owned_dofs()); + DoFTools::make_sparsity_pattern(dof_handler_0, + dof_handler_1, + dynamic_sparsity_pattern); + dynamic_sparsity_pattern.print(deallog.get_file_stream()); + SparsityTools::distribute_sparsity_pattern( + dynamic_sparsity_pattern, + dof_handler_0.n_locally_owned_dofs_per_processor(), + MPI_COMM_WORLD, + dof_handler_0.locally_owned_dofs()); + dynamic_sparsity_pattern.print(deallog.get_file_stream()); + + deallog << "OK" << std::endl; +} diff --git a/tests/dofs/sparsity_pattern_07.mpirun=1.output b/tests/dofs/sparsity_pattern_07.mpirun=1.output new file mode 100644 index 0000000000..64245124db --- /dev/null +++ b/tests/dofs/sparsity_pattern_07.mpirun=1.output @@ -0,0 +1,45 @@ + +DEAL:0::global number of active cells: 16 +DEAL:0::local number of active cells: 16 +DEAL:0::number of locally owned active cells: 16 +DEAL:0::locally owned dofs 0: {[0,15]} +DEAL:0:: +DEAL:0::locally owned dofs 1: {[0,26]} +DEAL:0:: +DEAL:0::locally relevant dofs 0: {[0,15]} +DEAL:0:: +DEAL:0::locally relevant dofs 1: {[0,26]} +DEAL:0:: +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +DEAL:0::OK diff --git a/tests/dofs/sparsity_pattern_07.mpirun=2.output b/tests/dofs/sparsity_pattern_07.mpirun=2.output new file mode 100644 index 0000000000..3138fa9822 --- /dev/null +++ b/tests/dofs/sparsity_pattern_07.mpirun=2.output @@ -0,0 +1,59 @@ + +DEAL:0::global number of active cells: 16 +DEAL:0::local number of active cells: 16 +DEAL:0::number of locally owned active cells: 8 +DEAL:0::locally owned dofs 0: {[0,7]} +DEAL:0:: +DEAL:0::locally owned dofs 1: {[0,14]} +DEAL:0:: +DEAL:0::locally relevant dofs 0: {[0,15]} +DEAL:0:: +DEAL:0::locally relevant dofs 1: {[0,26]} +DEAL:0:: +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +[0,0,1,2,3] +[1,1,3,4,5] +[2,2,3,6,7] +[3,3,5,7,8] +[4,4,5,9,10] +[5,9,10,11,12] +[6,5,8,10,13] +[7,10,12,13,14] +DEAL:0::OK + +DEAL:1::global number of active cells: 16 +DEAL:1::local number of active cells: 16 +DEAL:1::number of locally owned active cells: 8 +DEAL:1::locally owned dofs 0: {[8,15]} +DEAL:1:: +DEAL:1::locally owned dofs 1: {[15,26]} +DEAL:1:: +DEAL:1::locally relevant dofs 0: {[0,15]} +DEAL:1:: +DEAL:1::locally relevant dofs 1: {[0,26]} +DEAL:1:: +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +[8,11,12,15,16] +[9,15,16,17,18] +[10,12,14,16,19] +[11,16,18,19,20] +[12,17,18,21,22] +[13,21,22,23,24] +[14,18,20,22,25] +[15,22,24,25,26] +DEAL:1::OK + diff --git a/tests/grid/get_finest_common_cells_04.cc b/tests/grid/get_finest_common_cells_04.cc new file mode 100644 index 0000000000..cab08b18c5 --- /dev/null +++ b/tests/grid/get_finest_common_cells_04.cc @@ -0,0 +1,104 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2006 - 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include + +#include + +#include +#include +#include + +#include "../tests.h" + + + +template +void +test() +{ + // create 2 triangulations with the + // same coarse grid, and refine + // them differently + parallel::shared::Triangulation tria_0(MPI_COMM_WORLD); + parallel::shared::Triangulation tria_1(MPI_COMM_WORLD); + GridGenerator::hyper_cube(tria_0); + GridGenerator::hyper_cube(tria_1); + + tria_0.refine_global(2); + tria_1.refine_global(2); + + tria_0.begin_active()->set_refine_flag(); + tria_0.execute_coarsening_and_refinement(); + + tria_1.last_active()->set_refine_flag(); + tria_1.execute_coarsening_and_refinement(); + + tria_1.last_active()->set_refine_flag(); + tria_1.execute_coarsening_and_refinement(); + +#if 0 // turn this block back on to get plots of grids and locally owned cells + if (dim == 2) + { + std::string tria_0_out = + "tria-0-" + std::to_string(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)) + ".svg"; + std::ofstream tria_0_out_stream(tria_0_out); + std::string tria_1_out = + "tria-1-" + std::to_string(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)) + ".svg"; + std::ofstream tria_1_out_stream(tria_1_out); + GridOut().write_svg(tria_0, tria_0_out_stream); + GridOut().write_svg(tria_1, tria_1_out_stream); + } + + deallog << "tria 0 cells:" << std::endl; + for (const auto &cell : tria_0.active_cell_iterators()) + { + if (cell->is_locally_owned()) + deallog << cell << std::endl; + } + + deallog << "tria 1 cells:" << std::endl; + for (const auto &cell : tria_1.active_cell_iterators()) + { + if (cell->is_locally_owned()) + deallog << cell << std::endl; + } +#endif + + typedef std::list::cell_iterator, + typename Triangulation::cell_iterator>> + CellList; + + deallog << "number of locally owned cells in tria 0 and tria 1: " + << tria_0.n_active_cells() << ' ' << tria_1.n_active_cells() + << std::endl; + const CellList cell_list = GridTools::get_finest_common_cells(tria_0, tria_1); + for (typename CellList::const_iterator cell_pair = cell_list.begin(); + cell_pair != cell_list.end(); + ++cell_pair) + deallog << cell_pair->first << ' ' << cell_pair->second << std::endl; +} + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + MPILogInitAll all; + + test<2>(); + test<3>(); +} diff --git a/tests/grid/get_finest_common_cells_04.with_p4est=true.mpirun=1.output b/tests/grid/get_finest_common_cells_04.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..993a4a6e7c --- /dev/null +++ b/tests/grid/get_finest_common_cells_04.with_p4est=true.mpirun=1.output @@ -0,0 +1,83 @@ + +DEAL:0::number of locally owned cells in tria 0 and tria 1: 19 22 +DEAL:0::2.0 2.0 +DEAL:0::2.1 2.1 +DEAL:0::2.2 2.2 +DEAL:0::2.3 2.3 +DEAL:0::2.4 2.4 +DEAL:0::2.5 2.5 +DEAL:0::2.6 2.6 +DEAL:0::2.7 2.7 +DEAL:0::2.8 2.8 +DEAL:0::2.9 2.9 +DEAL:0::2.10 2.10 +DEAL:0::2.11 2.11 +DEAL:0::2.12 2.12 +DEAL:0::2.13 2.13 +DEAL:0::2.14 2.14 +DEAL:0::2.15 2.15 +DEAL:0::number of locally owned cells in tria 0 and tria 1: 71 78 +DEAL:0::2.0 2.0 +DEAL:0::2.1 2.1 +DEAL:0::2.2 2.2 +DEAL:0::2.3 2.3 +DEAL:0::2.4 2.4 +DEAL:0::2.5 2.5 +DEAL:0::2.6 2.6 +DEAL:0::2.7 2.7 +DEAL:0::2.8 2.8 +DEAL:0::2.9 2.9 +DEAL:0::2.10 2.10 +DEAL:0::2.11 2.11 +DEAL:0::2.12 2.12 +DEAL:0::2.13 2.13 +DEAL:0::2.14 2.14 +DEAL:0::2.15 2.15 +DEAL:0::2.16 2.16 +DEAL:0::2.17 2.17 +DEAL:0::2.18 2.18 +DEAL:0::2.19 2.19 +DEAL:0::2.20 2.20 +DEAL:0::2.21 2.21 +DEAL:0::2.22 2.22 +DEAL:0::2.23 2.23 +DEAL:0::2.24 2.24 +DEAL:0::2.25 2.25 +DEAL:0::2.26 2.26 +DEAL:0::2.27 2.27 +DEAL:0::2.28 2.28 +DEAL:0::2.29 2.29 +DEAL:0::2.30 2.30 +DEAL:0::2.31 2.31 +DEAL:0::2.32 2.32 +DEAL:0::2.33 2.33 +DEAL:0::2.34 2.34 +DEAL:0::2.35 2.35 +DEAL:0::2.36 2.36 +DEAL:0::2.37 2.37 +DEAL:0::2.38 2.38 +DEAL:0::2.39 2.39 +DEAL:0::2.40 2.40 +DEAL:0::2.41 2.41 +DEAL:0::2.42 2.42 +DEAL:0::2.43 2.43 +DEAL:0::2.44 2.44 +DEAL:0::2.45 2.45 +DEAL:0::2.46 2.46 +DEAL:0::2.47 2.47 +DEAL:0::2.48 2.48 +DEAL:0::2.49 2.49 +DEAL:0::2.50 2.50 +DEAL:0::2.51 2.51 +DEAL:0::2.52 2.52 +DEAL:0::2.53 2.53 +DEAL:0::2.54 2.54 +DEAL:0::2.55 2.55 +DEAL:0::2.56 2.56 +DEAL:0::2.57 2.57 +DEAL:0::2.58 2.58 +DEAL:0::2.59 2.59 +DEAL:0::2.60 2.60 +DEAL:0::2.61 2.61 +DEAL:0::2.62 2.62 +DEAL:0::2.63 2.63 diff --git a/tests/grid/get_finest_common_cells_04.with_p4est=true.mpirun=2.output b/tests/grid/get_finest_common_cells_04.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..87c5e343e3 --- /dev/null +++ b/tests/grid/get_finest_common_cells_04.with_p4est=true.mpirun=2.output @@ -0,0 +1,167 @@ + +DEAL:0::number of locally owned cells in tria 0 and tria 1: 19 22 +DEAL:0::2.0 2.0 +DEAL:0::2.1 2.1 +DEAL:0::2.2 2.2 +DEAL:0::2.3 2.3 +DEAL:0::2.4 2.4 +DEAL:0::2.5 2.5 +DEAL:0::2.6 2.6 +DEAL:0::2.7 2.7 +DEAL:0::2.8 2.8 +DEAL:0::2.9 2.9 +DEAL:0::2.10 2.10 +DEAL:0::2.11 2.11 +DEAL:0::2.12 2.12 +DEAL:0::2.13 2.13 +DEAL:0::2.14 2.14 +DEAL:0::2.15 2.15 +DEAL:0::number of locally owned cells in tria 0 and tria 1: 71 78 +DEAL:0::2.0 2.0 +DEAL:0::2.1 2.1 +DEAL:0::2.2 2.2 +DEAL:0::2.3 2.3 +DEAL:0::2.4 2.4 +DEAL:0::2.5 2.5 +DEAL:0::2.6 2.6 +DEAL:0::2.7 2.7 +DEAL:0::2.8 2.8 +DEAL:0::2.9 2.9 +DEAL:0::2.10 2.10 +DEAL:0::2.11 2.11 +DEAL:0::2.12 2.12 +DEAL:0::2.13 2.13 +DEAL:0::2.14 2.14 +DEAL:0::2.15 2.15 +DEAL:0::2.16 2.16 +DEAL:0::2.17 2.17 +DEAL:0::2.18 2.18 +DEAL:0::2.19 2.19 +DEAL:0::2.20 2.20 +DEAL:0::2.21 2.21 +DEAL:0::2.22 2.22 +DEAL:0::2.23 2.23 +DEAL:0::2.24 2.24 +DEAL:0::2.25 2.25 +DEAL:0::2.26 2.26 +DEAL:0::2.27 2.27 +DEAL:0::2.28 2.28 +DEAL:0::2.29 2.29 +DEAL:0::2.30 2.30 +DEAL:0::2.31 2.31 +DEAL:0::2.32 2.32 +DEAL:0::2.33 2.33 +DEAL:0::2.34 2.34 +DEAL:0::2.35 2.35 +DEAL:0::2.36 2.36 +DEAL:0::2.37 2.37 +DEAL:0::2.38 2.38 +DEAL:0::2.39 2.39 +DEAL:0::2.40 2.40 +DEAL:0::2.41 2.41 +DEAL:0::2.42 2.42 +DEAL:0::2.43 2.43 +DEAL:0::2.44 2.44 +DEAL:0::2.45 2.45 +DEAL:0::2.46 2.46 +DEAL:0::2.47 2.47 +DEAL:0::2.48 2.48 +DEAL:0::2.49 2.49 +DEAL:0::2.50 2.50 +DEAL:0::2.51 2.51 +DEAL:0::2.52 2.52 +DEAL:0::2.53 2.53 +DEAL:0::2.54 2.54 +DEAL:0::2.55 2.55 +DEAL:0::2.56 2.56 +DEAL:0::2.57 2.57 +DEAL:0::2.58 2.58 +DEAL:0::2.59 2.59 +DEAL:0::2.60 2.60 +DEAL:0::2.61 2.61 +DEAL:0::2.62 2.62 +DEAL:0::2.63 2.63 + +DEAL:1::number of locally owned cells in tria 0 and tria 1: 19 22 +DEAL:1::2.0 2.0 +DEAL:1::2.1 2.1 +DEAL:1::2.2 2.2 +DEAL:1::2.3 2.3 +DEAL:1::2.4 2.4 +DEAL:1::2.5 2.5 +DEAL:1::2.6 2.6 +DEAL:1::2.7 2.7 +DEAL:1::2.8 2.8 +DEAL:1::2.9 2.9 +DEAL:1::2.10 2.10 +DEAL:1::2.11 2.11 +DEAL:1::2.12 2.12 +DEAL:1::2.13 2.13 +DEAL:1::2.14 2.14 +DEAL:1::2.15 2.15 +DEAL:1::number of locally owned cells in tria 0 and tria 1: 71 78 +DEAL:1::2.0 2.0 +DEAL:1::2.1 2.1 +DEAL:1::2.2 2.2 +DEAL:1::2.3 2.3 +DEAL:1::2.4 2.4 +DEAL:1::2.5 2.5 +DEAL:1::2.6 2.6 +DEAL:1::2.7 2.7 +DEAL:1::2.8 2.8 +DEAL:1::2.9 2.9 +DEAL:1::2.10 2.10 +DEAL:1::2.11 2.11 +DEAL:1::2.12 2.12 +DEAL:1::2.13 2.13 +DEAL:1::2.14 2.14 +DEAL:1::2.15 2.15 +DEAL:1::2.16 2.16 +DEAL:1::2.17 2.17 +DEAL:1::2.18 2.18 +DEAL:1::2.19 2.19 +DEAL:1::2.20 2.20 +DEAL:1::2.21 2.21 +DEAL:1::2.22 2.22 +DEAL:1::2.23 2.23 +DEAL:1::2.24 2.24 +DEAL:1::2.25 2.25 +DEAL:1::2.26 2.26 +DEAL:1::2.27 2.27 +DEAL:1::2.28 2.28 +DEAL:1::2.29 2.29 +DEAL:1::2.30 2.30 +DEAL:1::2.31 2.31 +DEAL:1::2.32 2.32 +DEAL:1::2.33 2.33 +DEAL:1::2.34 2.34 +DEAL:1::2.35 2.35 +DEAL:1::2.36 2.36 +DEAL:1::2.37 2.37 +DEAL:1::2.38 2.38 +DEAL:1::2.39 2.39 +DEAL:1::2.40 2.40 +DEAL:1::2.41 2.41 +DEAL:1::2.42 2.42 +DEAL:1::2.43 2.43 +DEAL:1::2.44 2.44 +DEAL:1::2.45 2.45 +DEAL:1::2.46 2.46 +DEAL:1::2.47 2.47 +DEAL:1::2.48 2.48 +DEAL:1::2.49 2.49 +DEAL:1::2.50 2.50 +DEAL:1::2.51 2.51 +DEAL:1::2.52 2.52 +DEAL:1::2.53 2.53 +DEAL:1::2.54 2.54 +DEAL:1::2.55 2.55 +DEAL:1::2.56 2.56 +DEAL:1::2.57 2.57 +DEAL:1::2.58 2.58 +DEAL:1::2.59 2.59 +DEAL:1::2.60 2.60 +DEAL:1::2.61 2.61 +DEAL:1::2.62 2.62 +DEAL:1::2.63 2.63 +