From: tcclevenger Date: Thu, 16 Mar 2017 00:10:40 +0000 (-0400) Subject: requested changes, round 1 X-Git-Tag: v9.0.0-rc1~1769^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c13e6d8ae68f6eea41ae6f089278c5acc3df58a1;p=dealii.git requested changes, round 1 --- diff --git a/include/deal.II/distributed/shared_tria.h b/include/deal.II/distributed/shared_tria.h index 493dc543ae..c420786c92 100644 --- a/include/deal.II/distributed/shared_tria.h +++ b/include/deal.II/distributed/shared_tria.h @@ -107,17 +107,21 @@ namespace parallel * { * parallel::shared::Triangulation tria(..., * parallel::shared::Triangulation::Settings::partition_custom_signal); - * shared_tria.signals.post_refinement.connect (std::bind(&mypartition, std::ref(shared_tria))); + * tria.signals.post_refinement.connect (std::bind(&mypartition, std::ref(shared_tria))); * } * @endcode + * + * Note: If you plan to use a custom partition with geometric multigrid, + * you must manualy partition the level cells in addition to the active cells. */ partition_custom_signal = 0x4, /** - * This flags needs to be set to use the geometric multigrid - * functionality. This option requires additional computation and - * communication. Note: geometric multigrid is still a work in - * progress and not yet functional for shared triangulation. + * This flag needs to be set to use the geometric multigrid + * functionality. This option requires additional computation and communication. + * + * Note: This flag should always be set alongside a flag for an + * active cell partitioning method. */ construct_multigrid_hierarchy = 0x8, }; diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc index 1e928ed7cf..fba438e439 100644 --- a/source/distributed/shared_tria.cc +++ b/source/distributed/shared_tria.cc @@ -45,30 +45,78 @@ namespace parallel Assert((settings & (partition_metis | partition_zorder | partition_custom_signal)) == partition_metis || (settings & (partition_metis | partition_zorder | partition_custom_signal)) == partition_zorder || (settings & (partition_metis | partition_zorder | partition_custom_signal)) == partition_custom_signal, - ExcMessage ("Settings must contain only one type of active cell partitioning scheme.")) + ExcMessage ("Settings must contain exactly one type of active cell partitioning scheme.")) } + namespace + { + /** + * Helper function for partition() which determines halo + * layer cells for a given level + */ + template + std::vector::cell_iterator> + compute_cell_halo_layer_on_level + (parallel::shared::Triangulation &tria, + const std_cxx11::function::cell_iterator &)> &predicate, + const unsigned int level) + { + std::vector::cell_iterator> level_halo_layer; + std::vector locally_active_vertices_on_level_subdomain (tria.n_vertices(), false); + + // Find the cells for which the predicate is true + // These are the cells around which we wish to construct + // the halo layer + for (typename parallel::shared::Triangulation::cell_iterator + cell = tria.begin(level); + cell != tria.end(level); ++cell) + if (predicate(cell)) // True predicate -> part of subdomain + for (unsigned int v=0; v::vertices_per_cell; ++v) + locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] = true; + + // Find the cells that do not conform to the predicate + // but share a vertex with the selected level subdomain + // These comprise the halo layer + for (typename parallel::shared::Triangulation::cell_iterator + cell = tria.begin(level); + cell != tria.end(level); ++cell) + if (!predicate(cell)) // False predicate -> possible halo layer cell + for (unsigned int v=0; v::vertices_per_cell; ++v) + if (locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] == true) + { + level_halo_layer.push_back(cell); + break; + } + + return level_halo_layer; + } + } + template void Triangulation::partition() { if (settings & partition_metis) { dealii::GridTools::partition_triangulation (this->n_subdomains, *this); - if (settings & construct_multigrid_hierarchy) - dealii::GridTools::partition_multigrid_levels(*this); } else if (settings & partition_zorder) { dealii::GridTools::partition_triangulation_zorder (this->n_subdomains, *this); - if (settings & construct_multigrid_hierarchy) - dealii::GridTools::partition_multigrid_levels(*this); } - else + else if (settings & partition_custom_signal) { // User partitions mesh manually } + else + { + AssertThrow(false, ExcInternalError()) + } + + // custom partition require manual partitioning of level cells + if ((settings & construct_multigrid_hierarchy) && !(settings & partition_custom_signal)) + dealii::GridTools::partition_multigrid_levels(*this); true_subdomain_ids_of_cells.resize(this->n_active_cells()); @@ -98,6 +146,36 @@ namespace parallel active_halo_layer.find(cell) == active_halo_layer.end()) cell->set_subdomain_id(numbers::artificial_subdomain_id); } + + // loop over all cells in multigrid hierarchy and mark artificial: + if (settings & construct_multigrid_hierarchy) + { + std_cxx11::function::cell_iterator &)> predicate + = IteratorFilters::LocallyOwnedLevelCell(); + for (unsigned int lvl=0; lvln_levels(); ++lvl) + { + const std::vector::cell_iterator> + level_halo_layer_vector = compute_cell_halo_layer_on_level (*this, predicate, lvl); + std::set::cell_iterator> + level_halo_layer(level_halo_layer_vector.begin(), level_halo_layer_vector.end()); + + typename parallel::shared::Triangulation::cell_iterator + cell = this->begin(lvl), + endc = this->end(lvl); + for (; cell != endc; cell++) + { + // for active cells we must keep level subdomain id of all neighbors, + // not just neighbors that exist on the same level. + // if the cells subdomain id was not set to artitficial above, we will + // also keep its level subdomain id. + if (!cell->has_children() && cell->subdomain_id() != numbers::artificial_subdomain_id) + continue; + if (!cell->is_locally_owned_on_level() && + level_halo_layer.find(cell) == level_halo_layer.end()) + cell->set_level_subdomain_id(numbers::artificial_subdomain_id); + } + } + } } else { @@ -106,34 +184,6 @@ namespace parallel true_subdomain_ids_of_cells[index] = cell->subdomain_id(); } - // loop over all cells in multigrid hierarchy and mark artificial: - if (allow_artificial_cells && (settings & construct_multigrid_hierarchy)) - { - for (unsigned int lvl=0; lvln_levels(); ++lvl) - { - const std::vector::cell_iterator> - level_halo_layer_vector = GridTools::compute_cell_halo_layer_on_level (*this, this->my_subdomain,lvl); - std::set::cell_iterator> - level_halo_layer(level_halo_layer_vector.begin(), level_halo_layer_vector.end()); - - typename parallel::shared::Triangulation::cell_iterator - cell = this->begin(lvl), - endc = this->end(lvl); - for (; cell != endc; cell++) - { - // for active cells we must keep level subdomain id of all neighbors, - // not just neighbors that exist on the same level. - // if the cells subdomain id was not set to artitficial above, we will - // also keep its level subdomain id. - if (!cell->has_children() && cell->subdomain_id() != numbers::artificial_subdomain_id) - continue; - if (cell->level_subdomain_id() != this->my_subdomain && - level_halo_layer.find(cell) == level_halo_layer.end()) - cell->set_level_subdomain_id(numbers::artificial_subdomain_id); - } - } - } - #ifdef DEBUG { // Assert that each cell is owned by a processor @@ -153,30 +203,27 @@ namespace parallel Assert(total_cells == this->n_active_cells(), ExcMessage("Not all cells are assigned to a processor.")) } -#endif + // If running with multigrid, assert that each level + // cell is owned by a processor if (settings & construct_multigrid_hierarchy) { -#ifdef DEBUG - { - // Assert that each level cell is owned by a processor if running with multigrid - unsigned int n_my_cells = 0; - typename parallel::shared::Triangulation::cell_iterator - cell = this->begin(), - endc = this->end(); - for (; cell!=endc; ++cell) - if (cell->is_locally_owned_on_level()) - n_my_cells += 1; - - unsigned int total_cells; - int ierr = MPI_Allreduce (&n_my_cells, &total_cells, 1, MPI_UNSIGNED, MPI_SUM, this->get_communicator()); - AssertThrowMPI(ierr); - - Assert(total_cells == this->n_cells(), - ExcMessage("Not all cells are assigned to a processor.")) - } -#endif + unsigned int n_my_cells = 0; + typename parallel::shared::Triangulation::cell_iterator + cell = this->begin(), + endc = this->end(); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned_on_level()) + n_my_cells += 1; + + unsigned int total_cells; + int ierr = MPI_Allreduce (&n_my_cells, &total_cells, 1, MPI_UNSIGNED, MPI_SUM, this->get_communicator()); + AssertThrowMPI(ierr); + + Assert(total_cells == this->n_cells(), + ExcMessage("Not all cells are assigned to a processor.")) } +#endif } diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index 969fe951d0..f4989746a8 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -1606,45 +1606,6 @@ next_cell: } - template - std::vector - compute_cell_halo_layer_on_level - (const MeshType &mesh, - const unsigned int my_level_subdomain_id, - const unsigned int level) - { - std::vector level_halo_layer; - std::vector locally_active_vertices_on_level_subdomain (mesh.get_triangulation().n_vertices(), - false); - - // Find the cells which are locally owned. - // These are the cells around which we wish to construct - // the halo layer - for (typename MeshType::cell_iterator - cell = mesh.begin(level); - cell != mesh.end(level); ++cell) - if (cell->level_subdomain_id() == my_level_subdomain_id) - for (unsigned int v=0; v::vertices_per_cell; ++v) - locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] = true; - - // Find the cells which are not locally owned - // but share a vertex with the selected level subdomain - // These comprise the halo layer - for (typename MeshType::cell_iterator - cell = mesh.begin(level); - cell != mesh.end(level); ++cell) - if (cell->level_subdomain_id() != my_level_subdomain_id) //False -> possible halo layer cell - for (unsigned int v=0; v::vertices_per_cell; ++v) - if (locally_active_vertices_on_level_subdomain[cell->vertex_index(v)] == true) - { - level_halo_layer.push_back(cell); - break; - } - - return level_halo_layer; - } - - template std::vector @@ -1668,7 +1629,6 @@ next_cell: - template std::vector::active_cell_iterator> > vertex_to_cell_map(const Triangulation &triangulation) @@ -2197,15 +2157,15 @@ next_cell: * recursive helper function for partition_triangulation_zorder */ template - void set_subdomain_id_in_zorder_recursively(IT cell, - unsigned int ¤t_proc_idx, - unsigned int ¤t_cell_idx, - const unsigned int n_active_cellls, + void set_subdomain_id_in_zorder_recursively(IT cell, + unsigned int ¤t_proc_idx, + unsigned int ¤t_cell_idx, + const unsigned int n_active_cells, const unsigned int n_partitions) { if (cell->active()) { - while (current_cell_idx >= floor((long)n_active_cellls*(current_proc_idx+1)/n_partitions)) + while (current_cell_idx >= floor((long)n_active_cells*(current_proc_idx+1)/n_partitions)) ++current_proc_idx; cell->set_subdomain_id(current_proc_idx); ++current_cell_idx; @@ -2216,7 +2176,7 @@ next_cell: set_subdomain_id_in_zorder_recursively(cell->child(n), current_proc_idx, current_cell_idx, - n_active_cellls, + n_active_cells, n_partitions); } } @@ -2245,14 +2205,8 @@ next_cell: return; } - typedef typename Triangulation::active_cell_iterator ac_it; - typedef typename Triangulation::cell_iterator lvl_it; - - unsigned int current_proc_idx=0; - unsigned int current_cell_idx=0; - const unsigned int n_active_cellls = triangulation.n_active_cells(); - - // create coarse cell reordering + // Duplicate the coarse cell reordoring + // as done in p4est std::vector coarse_cell_to_p4est_tree_permutation; std::vector p4est_tree_to_coarse_cell_permutation; @@ -2265,21 +2219,32 @@ next_cell: p4est_tree_to_coarse_cell_permutation = Utilities::invert_permutation (coarse_cell_to_p4est_tree_permutation); + unsigned int current_proc_idx=0; + unsigned int current_cell_idx=0; + const unsigned int n_active_cells = triangulation.n_active_cells(); + // set subdomain id for active cell descendants // of each coarse cell in permuted order for (unsigned int idx=0; idx::cell_iterator + coarse_cell (&triangulation, 0, coarse_cell_idx); + set_subdomain_id_in_zorder_recursively(coarse_cell, current_proc_idx, current_cell_idx, - n_active_cellls, + n_active_cells, n_partitions); } - // ensure terminal siblings are on the same - // processor as in p4est + // if all children of a cell are active (e.g. we + // have a cell that is refined once and no part + // is refined further), p4est places all of them + // on the same processor. The new owner will be + // the processor with the largest number of children + // (ties are broken by picking the lower rank). + // Duplicate this logic here. { typename Triangulation::cell_iterator cell = triangulation.begin(), @@ -2321,11 +2286,11 @@ next_cell: partition_multigrid_levels (Triangulation &triangulation) { unsigned int n_levels = triangulation.n_levels(); - for (int lvl=n_levels-1; lvl>=0; --lvl) + for (int lvl = n_levels-1; lvl>=0; --lvl) { typename Triangulation::cell_iterator cell = triangulation.begin(lvl), - endc=triangulation.end(lvl); + endc = triangulation.end(lvl); for (; cell!=endc; ++cell) { if (!cell->has_children()) diff --git a/source/grid/grid_tools.inst.in b/source/grid/grid_tools.inst.in index d516df1c73..8bc7c7cf35 100644 --- a/source/grid/grid_tools.inst.in +++ b/source/grid/grid_tools.inst.in @@ -45,12 +45,6 @@ for (X : TRIANGULATION_AND_DOFHANDLERS; deal_II_dimension : DIMENSIONS ; deal_II compute_active_cell_halo_layer (const X &, const std_cxx11::function::type&)> &); - template - std::vector - compute_cell_halo_layer_on_level (const X &, - const unsigned int, - const unsigned int); - template std::vector::type> compute_ghost_cell_halo_layer (const X &); diff --git a/tests/sharedtria/tria_custom_refine.cc b/tests/sharedtria/tria_custom_refine.cc index 9ce08addc3..7422ee3bd6 100644 --- a/tests/sharedtria/tria_custom_refine.cc +++ b/tests/sharedtria/tria_custom_refine.cc @@ -72,7 +72,7 @@ void test() cell = shared_tria.begin_active(), endc = shared_tria.end(); for (; cell!=endc; ++cell) - if (cell->is_locally_owned()) + if (cell->subdomain_id() != numbers::artificial_subdomain_id) deallog << "(" << cell->id().to_string() << "," << cell->subdomain_id() << ")" << std::endl; } diff --git a/tests/sharedtria/tria_custom_refine.mpirun=3.output b/tests/sharedtria/tria_custom_refine.mpirun=3.output index f4fa947587..2ae81a1f79 100644 --- a/tests/sharedtria/tria_custom_refine.mpirun=3.output +++ b/tests/sharedtria/tria_custom_refine.mpirun=3.output @@ -1,5 +1,10 @@ DEAL:0:2d::(CellId,subdomain_id) for each active cell: +DEAL:0:2d::(0_1:1,1) +DEAL:0:2d::(0_1:2,1) +DEAL:0:2d::(0_1:3,1) +DEAL:0:2d::(1_1:2,2) +DEAL:0:2d::(2_1:0,2) DEAL:0:2d::(2_1:1,0) DEAL:0:2d::(2_1:2,0) DEAL:0:2d::(2_1:3,0) @@ -8,6 +13,14 @@ DEAL:0:2d::(0_2:01,0) DEAL:0:2d::(0_2:02,0) DEAL:0:2d::(0_2:03,0) DEAL:0:3d::(CellId,subdomain_id) for each active cell: +DEAL:0:3d::(0_1:1,1) +DEAL:0:3d::(0_1:2,1) +DEAL:0:3d::(0_1:3,1) +DEAL:0:3d::(0_1:4,1) +DEAL:0:3d::(0_1:5,2) +DEAL:0:3d::(0_1:6,2) +DEAL:0:3d::(0_1:7,2) +DEAL:0:3d::(1_1:0,2) DEAL:0:3d::(1_1:1,0) DEAL:0:3d::(1_1:2,0) DEAL:0:3d::(1_1:3,0) @@ -70,23 +83,90 @@ DEAL:1:2d::(0_1:1,1) DEAL:1:2d::(0_1:2,1) DEAL:1:2d::(0_1:3,1) DEAL:1:2d::(1_1:0,1) +DEAL:1:2d::(1_1:1,2) +DEAL:1:2d::(1_1:2,2) +DEAL:1:2d::(1_1:3,2) +DEAL:1:2d::(2_1:0,2) +DEAL:1:2d::(2_1:1,0) +DEAL:1:2d::(0_2:01,0) +DEAL:1:2d::(0_2:02,0) +DEAL:1:2d::(0_2:03,0) DEAL:1:3d::(CellId,subdomain_id) for each active cell: DEAL:1:3d::(0_1:1,1) DEAL:1:3d::(0_1:2,1) DEAL:1:3d::(0_1:3,1) DEAL:1:3d::(0_1:4,1) +DEAL:1:3d::(0_1:5,2) +DEAL:1:3d::(0_1:6,2) +DEAL:1:3d::(0_1:7,2) +DEAL:1:3d::(1_1:0,2) +DEAL:1:3d::(1_1:2,0) +DEAL:1:3d::(1_1:4,0) +DEAL:1:3d::(1_1:6,0) +DEAL:1:3d::(2_1:0,0) +DEAL:1:3d::(2_1:1,0) +DEAL:1:3d::(2_1:2,0) +DEAL:1:3d::(2_1:3,0) +DEAL:1:3d::(4_1:0,0) +DEAL:1:3d::(4_1:1,0) +DEAL:1:3d::(4_1:4,0) +DEAL:1:3d::(4_1:5,0) +DEAL:1:3d::(5_1:0,0) +DEAL:1:3d::(5_1:4,0) +DEAL:1:3d::(0_2:01,0) +DEAL:1:3d::(0_2:02,0) +DEAL:1:3d::(0_2:03,0) +DEAL:1:3d::(0_2:04,0) +DEAL:1:3d::(0_2:05,0) +DEAL:1:3d::(0_2:06,0) +DEAL:1:3d::(0_2:07,0) DEAL:1::OK DEAL:2:2d::(CellId,subdomain_id) for each active cell: +DEAL:2:2d::(0_1:1,1) +DEAL:2:2d::(0_1:2,1) +DEAL:2:2d::(0_1:3,1) +DEAL:2:2d::(1_1:0,1) DEAL:2:2d::(1_1:1,2) DEAL:2:2d::(1_1:2,2) DEAL:2:2d::(1_1:3,2) DEAL:2:2d::(2_1:0,2) +DEAL:2:2d::(2_1:1,0) +DEAL:2:2d::(2_1:2,0) +DEAL:2:2d::(2_1:3,0) DEAL:2:3d::(CellId,subdomain_id) for each active cell: +DEAL:2:3d::(0_1:1,1) +DEAL:2:3d::(0_1:2,1) +DEAL:2:3d::(0_1:3,1) +DEAL:2:3d::(0_1:4,1) DEAL:2:3d::(0_1:5,2) DEAL:2:3d::(0_1:6,2) DEAL:2:3d::(0_1:7,2) DEAL:2:3d::(1_1:0,2) +DEAL:2:3d::(1_1:1,0) +DEAL:2:3d::(1_1:2,0) +DEAL:2:3d::(1_1:3,0) +DEAL:2:3d::(1_1:4,0) +DEAL:2:3d::(1_1:5,0) +DEAL:2:3d::(1_1:6,0) +DEAL:2:3d::(1_1:7,0) +DEAL:2:3d::(2_1:0,0) +DEAL:2:3d::(2_1:1,0) +DEAL:2:3d::(2_1:2,0) +DEAL:2:3d::(2_1:3,0) +DEAL:2:3d::(3_1:0,0) +DEAL:2:3d::(3_1:2,0) +DEAL:2:3d::(4_1:0,0) +DEAL:2:3d::(4_1:1,0) +DEAL:2:3d::(4_1:4,0) +DEAL:2:3d::(4_1:5,0) +DEAL:2:3d::(5_1:0,0) +DEAL:2:3d::(5_1:4,0) +DEAL:2:3d::(6_1:0,0) +DEAL:2:3d::(6_1:1,0) +DEAL:2:3d::(0_2:05,0) +DEAL:2:3d::(0_2:06,0) +DEAL:2:3d::(0_2:07,0) DEAL:2::OK diff --git a/tests/sharedtria/tria_multigrid_01.cc b/tests/sharedtria/tria_multigrid_01.cc index fe9e65111b..763269cf23 100644 --- a/tests/sharedtria/tria_multigrid_01.cc +++ b/tests/sharedtria/tria_multigrid_01.cc @@ -15,7 +15,9 @@ // --------------------------------------------------------------------- -// create a shared tria mesh and distribute multigrid +// create a shared tria mesh and partition +// active mesh in z-order as well as multigrid +// compare partition against p4est #include "../tests.h" #include diff --git a/tests/sharedtria/tria_multigrid_01.mpirun=3.output b/tests/sharedtria/tria_multigrid_01.mpirun=3.with_p4est=true.output similarity index 100% rename from tests/sharedtria/tria_multigrid_01.mpirun=3.output rename to tests/sharedtria/tria_multigrid_01.mpirun=3.with_p4est=true.output diff --git a/tests/sharedtria/tria_zorder_01.cc b/tests/sharedtria/tria_zorder_01.cc index 95c21f4352..4b240e2327 100644 --- a/tests/sharedtria/tria_zorder_01.cc +++ b/tests/sharedtria/tria_zorder_01.cc @@ -16,6 +16,7 @@ // create a shared tria mesh and distribute with zorder scheme +// compare against p4est #include "../tests.h" #include diff --git a/tests/sharedtria/tria_zorder_01.mpirun=3.output b/tests/sharedtria/tria_zorder_01.mpirun=3.with_p4est=true.output similarity index 100% rename from tests/sharedtria/tria_zorder_01.mpirun=3.output rename to tests/sharedtria/tria_zorder_01.mpirun=3.with_p4est=true.output