From 46bb95e8d4a1474a6e7a935588268749b0830c52 Mon Sep 17 00:00:00 2001 From: tcclevenger Date: Fri, 17 Mar 2017 09:32:39 -0400 Subject: [PATCH] changes 2 --- include/deal.II/grid/grid_tools.h | 14 +++-- source/distributed/shared_tria.cc | 56 +++---------------- source/grid/grid_tools.cc | 40 +++++++++++++ source/grid/grid_tools.inst.in | 6 ++ tests/sharedtria/tria_multigrid_02.cc | 14 +++-- .../tria_multigrid_02.mpirun=3.output | 45 +++++++++++++++ tests/sharedtria/tria_zorder_02.cc | 14 +++-- .../sharedtria/tria_zorder_02.mpirun=3.output | 17 ++++++ 8 files changed, 143 insertions(+), 63 deletions(-) diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index 554fb1347d..952d74a3bf 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -715,16 +715,18 @@ namespace GridTools /** - * Extract and return the layer of cells around a subdomain on a given level - * in the @p mesh (i.e. those that share a common set of - * vertices with the level subdomain but are not a part of it). + * Extract and return the cell layer around a subdomain (set of + * cells) on a specified level of the @p mesh (i.e. those cells on + * that level that share a common set of vertices with the subdomain + * but are not a part of it). Here, the "subdomain" consists of exactly + * all of those cells for which the @p predicate returns @p true. */ template std::vector compute_cell_halo_layer_on_level - (const MeshType &mesh, - const unsigned int my_level_subdomain_id, - const unsigned int level); + (const MeshType &mesh, + const std_cxx11::function &predicate, + const unsigned int level); /** diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc index fba438e439..31903f812b 100644 --- a/source/distributed/shared_tria.cc +++ b/source/distributed/shared_tria.cc @@ -50,50 +50,6 @@ namespace parallel - namespace - { - /** - * Helper function for partition() which determines halo - * layer cells for a given level - */ - template - std::vector::cell_iterator> - compute_cell_halo_layer_on_level - (parallel::shared::Triangulation &tria, - const std_cxx11::function::cell_iterator &)> &predicate, - const unsigned int level) - { - std::vector::cell_iterator> level_halo_layer; - std::vector locally_active_vertices_on_level_subdomain (tria.n_vertices(), false); - - // Find the cells for which the predicate is true - // These are the cells around which we wish to construct - // the halo layer - for (typename parallel::shared::Triangulation::cell_iterator - cell = tria.begin(level); - cell != tria.end(level); ++cell) - if (predicate(cell)) // True predicate -> part of subdomain - for (unsigned int v=0; v::vertices_per_cell; ++v) - locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] = true; - - // Find the cells that do not conform to the predicate - // but share a vertex with the selected level subdomain - // These comprise the halo layer - for (typename parallel::shared::Triangulation::cell_iterator - cell = tria.begin(level); - cell != tria.end(level); ++cell) - if (!predicate(cell)) // False predicate -> possible halo layer cell - for (unsigned int v=0; v::vertices_per_cell; ++v) - if (locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] == true) - { - level_halo_layer.push_back(cell); - break; - } - - return level_halo_layer; - } - } - template void Triangulation::partition() { @@ -114,7 +70,8 @@ namespace parallel AssertThrow(false, ExcInternalError()) } - // custom partition require manual partitioning of level cells + // do not partition multigrid levels if user is + // defining a custom partition if ((settings & construct_multigrid_hierarchy) && !(settings & partition_custom_signal)) dealii::GridTools::partition_multigrid_levels(*this); @@ -155,7 +112,7 @@ namespace parallel for (unsigned int lvl=0; lvln_levels(); ++lvl) { const std::vector::cell_iterator> - level_halo_layer_vector = compute_cell_halo_layer_on_level (*this, predicate, lvl); + level_halo_layer_vector = GridTools::compute_cell_halo_layer_on_level (*this, predicate, lvl); std::set::cell_iterator> level_halo_layer(level_halo_layer_vector.begin(), level_halo_layer_vector.end()); @@ -164,10 +121,11 @@ namespace parallel endc = this->end(lvl); for (; cell != endc; cell++) { - // for active cells we must keep level subdomain id of all neighbors, - // not just neighbors that exist on the same level. + // for active cells we must keep level subdomain id of all neighbors + // to our subdomain, not just cells that share a vertex on the same level. // if the cells subdomain id was not set to artitficial above, we will - // also keep its level subdomain id. + // also keep its level subdomain id since it is either owned by this processor + // or in the ghost layer of the active mesh. if (!cell->has_children() && cell->subdomain_id() != numbers::artificial_subdomain_id) continue; if (!cell->is_locally_owned_on_level() && diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index f4989746a8..373b279613 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -1607,6 +1607,46 @@ next_cell: + template + std::vector + compute_cell_halo_layer_on_level + (const MeshType &mesh, + const std_cxx11::function &predicate, + const unsigned int level) + { + std::vector level_halo_layer; + std::vector locally_active_vertices_on_level_subdomain (mesh.get_triangulation().n_vertices(), + false); + + // Find the cells for which the predicate is true + // These are the cells around which we wish to construct + // the halo layer + for (typename MeshType::cell_iterator + cell = mesh.begin(level); + cell != mesh.end(level); ++cell) + if (predicate(cell)) // True predicate --> Part of subdomain + for (unsigned int v=0; v::vertices_per_cell; ++v) + locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] = true; + + // Find the cells that do not conform to the predicate + // but share a vertex with the selected subdomain on that level + // These comprise the halo layer + for (typename MeshType::cell_iterator + cell = mesh.begin(level); + cell != mesh.end(level); ++cell) + if (!predicate(cell)) // False predicate --> Potential halo cell + for (unsigned int v=0; v::vertices_per_cell; ++v) + if (locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] == true) + { + level_halo_layer.push_back(cell); + break; + } + + return level_halo_layer; + } + + + template std::vector compute_ghost_cell_halo_layer (const MeshType &mesh) diff --git a/source/grid/grid_tools.inst.in b/source/grid/grid_tools.inst.in index 8bc7c7cf35..b8fbeba429 100644 --- a/source/grid/grid_tools.inst.in +++ b/source/grid/grid_tools.inst.in @@ -45,6 +45,12 @@ for (X : TRIANGULATION_AND_DOFHANDLERS; deal_II_dimension : DIMENSIONS ; deal_II compute_active_cell_halo_layer (const X &, const std_cxx11::function::type&)> &); + template + std::vector + compute_cell_halo_layer_on_level (const X &, + const std_cxx11::function &, + const unsigned int); + template std::vector::type> compute_ghost_cell_halo_layer (const X &); diff --git a/tests/sharedtria/tria_multigrid_02.cc b/tests/sharedtria/tria_multigrid_02.cc index 0e92e9d2aa..de3a0ae242 100644 --- a/tests/sharedtria/tria_multigrid_02.cc +++ b/tests/sharedtria/tria_multigrid_02.cc @@ -53,10 +53,13 @@ void test() if (cell->center().norm() > 0.3 && cell->center().norm() < 0.42) cell->set_refine_flag(); shared_tria.execute_coarsening_and_refinement(); - for (typename Triangulation::active_cell_iterator cell=shared_tria.begin_active(); cell != shared_tria.end(); ++cell) - if (cell->at_boundary() && (cell->center()[0] < 0 || cell->center()[1] < 0)) - cell->set_refine_flag(); - shared_tria.execute_coarsening_and_refinement(); + if (dim != 1) + { + for (typename Triangulation::active_cell_iterator cell=shared_tria.begin_active(); cell != shared_tria.end(); ++cell) + if (cell->at_boundary() && (cell->center()[0] < 0 || cell->center()[1] < 0)) + cell->set_refine_flag(); + shared_tria.execute_coarsening_and_refinement(); + } deallog << "(CellId,level_subdomain_id) for each active cell:" << std::endl; for (unsigned int lvl=0; lvl(); + deallog.pop(); deallog.push("2d"); test<2>(); deallog.pop(); diff --git a/tests/sharedtria/tria_multigrid_02.mpirun=3.output b/tests/sharedtria/tria_multigrid_02.mpirun=3.output index aa5f75a3ab..06ad6d3168 100644 --- a/tests/sharedtria/tria_multigrid_02.mpirun=3.output +++ b/tests/sharedtria/tria_multigrid_02.mpirun=3.output @@ -1,4 +1,19 @@ +DEAL:0:1d::(CellId,level_subdomain_id) for each active cell: +DEAL:0:1d::(0_0:,0) +DEAL:0:1d::(1_0:,1) +DEAL:0:1d::(0_1:0,0) +DEAL:0:1d::(0_1:1,0) +DEAL:0:1d::(1_1:0,1) +DEAL:0:1d::(0_2:00,0) +DEAL:0:1d::(0_2:01,0) +DEAL:0:1d::(0_2:10,0) +DEAL:0:1d::(0_2:11,1) +DEAL:0:1d::(0_3:100,0) +DEAL:0:1d::(0_3:101,0) +DEAL:0:1d::(0_3:110,1) +DEAL:0:1d::(0_4:1010,0) +DEAL:0:1d::(0_4:1011,0) DEAL:0:2d::(CellId,level_subdomain_id) for each active cell: DEAL:0:2d::(0_0:,0) DEAL:0:2d::(1_0:,0) @@ -1756,6 +1771,24 @@ DEAL:0:3d::(7_4:0100,2) DEAL:0:3d::(7_4:0101,2) DEAL:0::OK +DEAL:1:1d::(CellId,level_subdomain_id) for each active cell: +DEAL:1:1d::(0_0:,0) +DEAL:1:1d::(1_0:,1) +DEAL:1:1d::(0_1:1,0) +DEAL:1:1d::(1_1:0,1) +DEAL:1:1d::(1_1:1,2) +DEAL:1:1d::(0_2:10,0) +DEAL:1:1d::(0_2:11,1) +DEAL:1:1d::(1_2:00,1) +DEAL:1:1d::(1_2:01,2) +DEAL:1:1d::(0_3:101,0) +DEAL:1:1d::(0_3:110,1) +DEAL:1:1d::(0_3:111,1) +DEAL:1:1d::(1_3:000,1) +DEAL:1:1d::(1_3:001,1) +DEAL:1:1d::(1_3:010,2) +DEAL:1:1d::(0_4:1011,0) +DEAL:1:1d::(1_4:0100,2) DEAL:1:2d::(CellId,level_subdomain_id) for each active cell: DEAL:1:2d::(0_0:,0) DEAL:1:2d::(1_0:,0) @@ -3829,6 +3862,18 @@ DEAL:1:3d::(7_4:0404,2) DEAL:1::OK +DEAL:2:1d::(CellId,level_subdomain_id) for each active cell: +DEAL:2:1d::(1_1:0,1) +DEAL:2:1d::(1_1:1,2) +DEAL:2:1d::(1_2:00,1) +DEAL:2:1d::(1_2:01,2) +DEAL:2:1d::(1_2:10,2) +DEAL:2:1d::(1_2:11,2) +DEAL:2:1d::(1_3:001,1) +DEAL:2:1d::(1_3:010,2) +DEAL:2:1d::(1_3:011,2) +DEAL:2:1d::(1_4:0100,2) +DEAL:2:1d::(1_4:0101,2) DEAL:2:2d::(CellId,level_subdomain_id) for each active cell: DEAL:2:2d::(0_0:,0) DEAL:2:2d::(1_0:,0) diff --git a/tests/sharedtria/tria_zorder_02.cc b/tests/sharedtria/tria_zorder_02.cc index 7af1191c7a..919d6645bb 100644 --- a/tests/sharedtria/tria_zorder_02.cc +++ b/tests/sharedtria/tria_zorder_02.cc @@ -52,10 +52,13 @@ void test() if (cell->center().norm() > 0.3 && cell->center().norm() < 0.42) cell->set_refine_flag(); shared_tria.execute_coarsening_and_refinement(); - for (typename Triangulation::active_cell_iterator cell=shared_tria.begin_active(); cell != shared_tria.end(); ++cell) - if (cell->at_boundary() && (cell->center()[0] < 0 || cell->center()[1] < 0)) - cell->set_refine_flag(); - shared_tria.execute_coarsening_and_refinement(); + if (dim != 1) + { + for (typename Triangulation::active_cell_iterator cell=shared_tria.begin_active(); cell != shared_tria.end(); ++cell) + if (cell->at_boundary() && (cell->center()[0] < 0 || cell->center()[1] < 0)) + cell->set_refine_flag(); + shared_tria.execute_coarsening_and_refinement(); + } deallog << "(CellId,subdomain_id) for each active cell:" << std::endl; typename Triangulation::active_cell_iterator @@ -71,6 +74,9 @@ int main(int argc, char *argv[]) Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll all; + deallog.push("1d"); + test<1>(); + deallog.pop(); deallog.push("2d"); test<2>(); deallog.pop(); diff --git a/tests/sharedtria/tria_zorder_02.mpirun=3.output b/tests/sharedtria/tria_zorder_02.mpirun=3.output index bf66b82b53..4e9ad0d465 100644 --- a/tests/sharedtria/tria_zorder_02.mpirun=3.output +++ b/tests/sharedtria/tria_zorder_02.mpirun=3.output @@ -1,4 +1,10 @@ +DEAL:0:1d::(CellId,subdomain_id) for each active cell: +DEAL:0:1d::(0_2:00,0) +DEAL:0:1d::(0_2:01,0) +DEAL:0:1d::(0_3:100,0) +DEAL:0:1d::(0_4:1010,0) +DEAL:0:1d::(0_4:1011,0) DEAL:0:2d::(CellId,subdomain_id) for each active cell: DEAL:0:2d::(0_2:03,0) DEAL:0:2d::(0_2:12,0) @@ -1161,6 +1167,11 @@ DEAL:0:3d::(1_4:6766,0) DEAL:0:3d::(1_4:6767,0) DEAL:0::OK +DEAL:1:1d::(CellId,subdomain_id) for each active cell: +DEAL:1:1d::(0_3:110,1) +DEAL:1:1d::(0_3:111,1) +DEAL:1:1d::(1_3:000,1) +DEAL:1:1d::(1_3:001,1) DEAL:1:2d::(CellId,subdomain_id) for each active cell: DEAL:1:2d::(1_2:12,1) DEAL:1:2d::(1_2:30,1) @@ -2318,6 +2329,12 @@ DEAL:1:3d::(4_4:3737,1) DEAL:1::OK +DEAL:2:1d::(CellId,subdomain_id) for each active cell: +DEAL:2:1d::(1_2:10,2) +DEAL:2:1d::(1_2:11,2) +DEAL:2:1d::(1_3:011,2) +DEAL:2:1d::(1_4:0100,2) +DEAL:2:1d::(1_4:0101,2) DEAL:2:2d::(CellId,subdomain_id) for each active cell: DEAL:2:2d::(2_2:21,2) DEAL:2:2d::(2_2:30,2) -- 2.39.5