From: Wolfgang Bangerth Date: Mon, 13 Jul 2015 21:24:03 +0000 (-0500) Subject: Allow attaching weights to each cell when partitioning meshes. X-Git-Tag: v8.4.0-rc2~751^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9f6bcf85bc0dffe2ae1b8f86e789ba1afbe2158e;p=dealii.git Allow attaching weights to each cell when partitioning meshes. This makes it possible to partitioning meshes in such a way that not the number of cells on each partition is roughly equal, but the sum of weights. --- diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index e829d4fc58..2edf49acff 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -475,9 +475,27 @@ namespace parallel * this-@>locally_owned_subdomain()), refinement and coarsening * flags are only respected for those locally owned cells. Flags may be * set on other cells as well (and may often, in fact, if you call - * dealii::Triangulation::prepare_coarsening_and_refinement) but will be + * dealii::Triangulation::prepare_coarsening_and_refinement()) but will be * largely ignored: the decision to refine the global mesh will only be * affected by flags set on locally owned cells. + * + * @note This function by default partitions the mesh in such a way + * that the number of cells on all processors is roughly equal, + * i.e., it is not possible to "weigh" cells that are more expensive + * to compute on than others. This is because these weights would apply + * to the current set of cells, which will then be refined and + * coarsened into a separate set of cells, which will only then be + * partitioned between processors. In other words, the weights for + * cells you could attach before calling this function will no + * longer be the set of cells that will ultimately be partitioned. + * If you want to set weights for partitioning, you need to create + * your triangulation object by passing the + * parallel::distributed::Triangulation::no_automatic_repartitioning + * flag to the constructor, which ensures that calling the current + * function only refines and coarsens the triangulation, but doesn't + * partition it. You would then call the current function, in a second + * step compute and use cell weights when partitioning the mesh upon + * calling repartition() with a non-default argument. */ virtual void execute_coarsening_and_refinement (); @@ -497,9 +515,53 @@ namespace parallel * information will be packed and shipped to different processors. In * other words, you probably want to treat a call to repartition() in * the same way as execute_coarsening_and_refinement() with respect to - * dealing with data movement (SolutionTransfer, etc..). - */ - void repartition (); + * dealing with data movement (SolutionTransfer, etc.). + * + * @param weights_per_cell A vector of weights that indicates how + * "expensive" computations are on each of the active cells. If + * left at its default value, this function will partition the + * mesh in such a way that it has roughly equal numbers of cells + * on all processors. If the argument is specified, then the mesh + * is partitioned so that the sum of weights of the cells owned + * by each processor is roughly equal. The size of the vector + * needs to equal the number of active cells of the current + * triangulation (i.e., must be equal to + * triangulation.n_active_cells()) and be in the order + * in which one encounters these cells in a loop over all cells (in + * the same way as vectors of refinement indicators are interpreted + * in namespace GridRefinement). In other words, the element of this + * vector that corresponds to a given cell has index + * cell-@>active_cell_index(). Of the elements of this + * vector, only those that correspond to locally owned + * active cells are actually considered. You can set the rest + * to arbitrary values. + * + * @note The only requirement on the weights is that every cell's + * weight is positive and that the sum over all weights on all + * processors can be formed using a 64-bit integer. Beyond that, + * it is your choice how you want to interpret the weights. A + * common approach is to consider the weights proportional to + * the cost of doing computations on a cell, e.g., by summing + * the time for assembly and solving. In practice, determining + * this cost is of course not trivial since we don't solve on + * isolated cells, but on the entire mesh. In such cases, one + * could, for example, choose the weight equal to the number + * of unknowns per cell (in the context of hp finite element + * methods), or using a heuristic that estimates the cost on + * each cell depending on whether, for example, one has to + * run some expensive algorithm on some cells but not others + * (such as forming boundary integrals during the assembly + * only on cells that are actually at the boundary, or computing + * expensive nonlinear terms only on some cells but not others, + * e.g., in the elasto-plastic problem in step-42). In any + * case, the scaling does not matter: you can choose the weights + * equal to the number of unknowns on each cell, or equal to ten + * times the number of unknowns -- because only their relative + * size matters in partitioning, both choices will lead to the + * same mesh. + */ + void repartition (const std::vector &weights_per_cell + = std::vector()); /** * When vertices have been moved locally, for example using code like diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index e725572e80..adf3ead236 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1942,6 +1942,162 @@ namespace // p4est cell is not in list return false; } + + + + /** + * A data structure that we use to store the weights of all cells to + * be used upon partitioning. The class stores them in the order in + * which p4est will encounter cells, not in the order in which + * deal.II walks over them. + */ + template + class PartitionWeights + { + public: + PartitionWeights (const parallel::distributed::Triangulation &triangulation, + const std::vector &cell_weights, + const std::vector &p4est_tree_to_coarse_cell_permutation, + const types::subdomain_id my_subdomain); + + /** + * A callback function that we pass to the p4est data structures when a + * forest is to be partitioned. The p4est functions call it back with a tree + * (the index of the tree that grows out of a given coarse cell) and a + * refinement path from that coarse cell to a terminal/leaf cell. The + * function returns the weight of the cell. + */ + static + int + cell_weight (typename internal::p4est::types::forest *forest, + typename internal::p4est::types::topidx coarse_cell_index, + typename internal::p4est::types::quadrant *quadrant); + + private: + std::vector cell_weights_list; + std::vector::const_iterator current_pointer; + + /** + * Recursively go through the cells of the p4est and deal.II triangulation + * and collect the partitioning weights. + */ + void + build_weight_list (const typename Triangulation::cell_iterator &cell, + const typename internal::p4est::types::quadrant &p4est_cell, + const types::subdomain_id my_subdomain, + const std::vector &cell_weights); + }; + + + template + PartitionWeights:: + PartitionWeights (const parallel::distributed::Triangulation &triangulation, + const std::vector &cell_weights, + const std::vector &p4est_tree_to_coarse_cell_permutation, + const types::subdomain_id my_subdomain) + { + Assert (cell_weights.size() == triangulation.n_active_cells(), ExcInternalError()); + + // build the cell_weights_list as an array over all locally owned + // active cells (i.e., a subset of the weights provided by the user, which + // are for all active cells), in the order in which p4est will encounter them + cell_weights_list.reserve (triangulation.n_locally_owned_active_cells()); + for (unsigned int c=0; c::cell_iterator + cell (&triangulation, 0, coarse_cell_index); + + typename internal::p4est::types::quadrant p4est_cell; + internal::p4est::functions:: + quadrant_set_morton (&p4est_cell, + /*level=*/0, + /*index=*/0); + p4est_cell.p.which_tree = c; + build_weight_list (cell, p4est_cell, my_subdomain, + cell_weights); + } + + // ensure that we built the list right + Assert(cell_weights_list.size() == triangulation.n_locally_owned_active_cells(), + ExcInternalError()); + + // set the current pointer to the first element of the list, given that + // we will walk through it sequentially + current_pointer = cell_weights_list.begin(); + } + + + template + void + PartitionWeights:: + build_weight_list (const typename Triangulation::cell_iterator &cell, + const typename internal::p4est::types::quadrant &p4est_cell, + const types::subdomain_id my_subdomain, + const std::vector &cell_weights) + { + if (!cell->has_children()) + { + if (cell->subdomain_id() == my_subdomain) + cell_weights_list.push_back (cell_weights[cell->active_cell_index()]); + } + else + { + typename internal::p4est::types::quadrant + p4est_child[GeometryInfo::max_children_per_cell]; + for (unsigned int c=0; c::max_children_per_cell; ++c) + switch (dim) + { + case 2: + P4EST_QUADRANT_INIT(&p4est_child[c]); + break; + case 3: + P8EST_QUADRANT_INIT(&p4est_child[c]); + break; + default: + Assert (false, ExcNotImplemented()); + } + internal::p4est::functions:: + quadrant_childrenv (&p4est_cell, + p4est_child); + for (unsigned int c=0; + c::max_children_per_cell; ++c) + { + p4est_child[c].p.which_tree = p4est_cell.p.which_tree; + build_weight_list (cell->child(c), + p4est_child[c], + my_subdomain, + cell_weights); + } + } + } + + + template + int + PartitionWeights:: + cell_weight (typename internal::p4est::types::forest *forest, + typename internal::p4est::types::topidx, + typename internal::p4est::types::quadrant *) + { + // the function gets two additional arguments, but we don't need them + // since we know in which order p4est will walk through the cells + // and have already built our weight lists in this order + + PartitionWeights *this_object + = reinterpret_cast*>(forest->user_pointer); + + Assert (this_object->current_pointer >= this_object->cell_weights_list.begin(), + ExcInternalError()); + Assert (this_object->current_pointer < this_object->cell_weights_list.end(), + ExcInternalError()); + + // get the weight, increment the pointer, and return the weight + return *this_object->current_pointer++; + } + } @@ -2615,19 +2771,15 @@ namespace parallel &connectivity); #endif if (numcpus != Utilities::MPI::n_mpi_processes (mpi_communicator)) - { - // We are changing the number of CPUs so we need to repartition. - // Note that p4est actually distributes the cells between the changed - // number of CPUs and so everything works without this call, but - // this command changes the distribution for some reason, so we - // will leave it in here. - dealii::internal::p4est::functions:: - partition (parallel_forest, - /* prepare coarsening */ 1, - /* weight_callback */ NULL); - - - } + // We are changing the number of CPUs so we need to repartition. + // Note that p4est actually distributes the cells between the changed + // number of CPUs and so everything works without this call, but + // this command changes the distribution for some reason, so we + // will leave it in here. + dealii::internal::p4est::functions:: + partition (parallel_forest, + /* prepare coarsening */ 1, + /* weight_callback */ NULL); try { @@ -3382,8 +3534,8 @@ namespace parallel // copy refine and coarsen flags into p4est and execute the refinement // and coarsening. this uses the refine_and_coarsen_list just built, - // which is communicated to the callback functions through the - // user_pointer + // which is communicated to the callback functions through + // p4est's user_pointer object Assert (parallel_forest->user_pointer == this, ExcInternalError()); parallel_forest->user_pointer = &refine_and_coarsen_list; @@ -3427,7 +3579,11 @@ namespace parallel // (such as SolutionTransfer data) to the p4est attach_mesh_data(); - // partition the new mesh between all processors + // partition the new mesh between all processors. we cannot + // use weights for each cell here because the cells have changed + // from the time the user has called execute_c_and_r due to the + // mesh refinement and coarsening, and the user has not had time + // to attach cell weights yet if (!(settings & no_automatic_repartitioning)) dealii::internal::p4est::functions:: partition (parallel_forest, @@ -3464,10 +3620,12 @@ namespace parallel template void - Triangulation::repartition () + Triangulation::repartition (const std::vector &cell_weights) { AssertThrow(settings & no_automatic_repartitioning, - ExcMessage("You need to set no_automatic_repartition when calling repartition() manually.")); + ExcMessage("You need to set the 'no_automatic_repartition' flag in the " + "constructor when creating this parallel::distributed::Triangulation " + "object if you want to call repartition() manually.")); #ifdef DEBUG for (typename Triangulation::active_cell_iterator @@ -3485,10 +3643,38 @@ namespace parallel // (such as SolutionTransfer data) to the p4est attach_mesh_data(); - dealii::internal::p4est::functions:: - partition (parallel_forest, - /* prepare coarsening */ 1, - /* weight_callback */ NULL); + if (cell_weights.size() == 0) + { + // no cell weights given -- call p4est's 'partition' without a + // callback for cell weights + dealii::internal::p4est::functions:: + partition (parallel_forest, + /* prepare coarsening */ 1, + /* weight_callback */ NULL); + } + else + { + AssertDimension (cell_weights.size(), this->n_active_cells()); + + // copy the cell weights into the order in which p4est will + // encounter them, then attach (temporarily) a pointer to + // this list through p4est's user_pointer object + Assert (parallel_forest->user_pointer == this, + ExcInternalError()); + PartitionWeights partition_weights (*this, + cell_weights, + p4est_tree_to_coarse_cell_permutation, + my_subdomain); + parallel_forest->user_pointer = &partition_weights; + + dealii::internal::p4est::functions:: + partition (parallel_forest, + /* prepare coarsening */ 1, + /* weight_callback */ &PartitionWeights::cell_weight); + + // reset the user pointer to its previous state + parallel_forest->user_pointer = this; + } try { @@ -3503,6 +3689,7 @@ namespace parallel refinement_in_progress = false; + // update how many cells, edges, etc, we store locally update_number_cache (); }