From: Rene Gassmoeller Date: Wed, 30 Sep 2015 14:12:16 +0000 (+0200) Subject: Introduce a signal for weighted repartitioning X-Git-Tag: v8.4.0-rc2~258^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2c27e757e39d2ff6fb002b16ae67564314c9f420;p=dealii.git Introduce a signal for weighted repartitioning --- diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 229c5e864d..5a09bb51dc 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -368,6 +368,8 @@ namespace parallel */ typedef typename dealii::Triangulation::active_cell_iterator active_cell_iterator; + typedef typename dealii::Triangulation::CellStatus CellStatus; + /** * Configuration flags for distributed Triangulations to be set in the * constructor. Settings can be combined using bitwise OR. @@ -482,22 +484,21 @@ namespace parallel * affected by flags set on locally owned cells. * * @note This function by default partitions the mesh in such a way - * that the number of cells on all processors is roughly equal, - * i.e., it is not possible to "weigh" cells that are more expensive - * to compute on than others. This is because these weights would apply - * to the current set of cells, which will then be refined and - * coarsened into a separate set of cells, which will only then be - * partitioned between processors. In other words, the weights for - * cells you could attach before calling this function will no - * longer be the set of cells that will ultimately be partitioned. - * If you want to set weights for partitioning, you need to create - * your triangulation object by passing the + * that the number of cells on all processors is roughly equal. + * If you want to set weights for partitioning, e.g. because some cells + * are more expensive to compute than others, you can use the signal + * cell_weight as documented in the dealii::Triangulation class. This + * function will check whether a function is connected to the signal + * and if so use it. If you prefer to repartition the mesh yourself at + * user-defined intervals only, you can create your triangulation + * object by passing the * parallel::distributed::Triangulation::no_automatic_repartitioning * flag to the constructor, which ensures that calling the current * function only refines and coarsens the triangulation, but doesn't - * partition it. You would then call the current function, in a second - * step compute and use cell weights when partitioning the mesh upon - * calling repartition() with a non-default argument. + * partition it. You can then call the repartition() function manually. + * The usage of the cell_weights signal is identical in both cases, + * if a function is connected to the signal it will be used to balance + * the calculated weights, otherwise the number of cells is balanced. */ virtual void execute_coarsening_and_refinement (); @@ -519,30 +520,15 @@ namespace parallel * the same way as execute_coarsening_and_refinement() with respect to * dealing with data movement (SolutionTransfer, etc.). * - * @param weights_per_cell A vector of weights that indicates how - * "expensive" computations are on each of the active cells. If - * left at its default value, this function will partition the - * mesh in such a way that it has roughly equal numbers of cells - * on all processors. If the argument is specified, then the mesh - * is partitioned so that the sum of weights of the cells owned - * by each processor is roughly equal. The size of the vector - * needs to equal the number of active cells of the current - * triangulation (i.e., must be equal to - * triangulation.n_active_cells()) and be in the order - * in which one encounters these cells in a loop over all cells (in - * the same way as vectors of refinement indicators are interpreted - * in namespace GridRefinement). In other words, the element of this - * vector that corresponds to a given cell has index - * cell-@>active_cell_index(). Of the elements of this - * vector, only those that correspond to locally owned - * active cells are actually considered. You can set the rest - * to arbitrary values. - * - * @note The only requirement on the weights is that every cell's - * weight is positive and that the sum over all weights on all - * processors can be formed using a 64-bit integer. Beyond that, - * it is your choice how you want to interpret the weights. A - * common approach is to consider the weights proportional to + * @note If no function is connected to the cell_weight signal described + * in the dealii::Triangulation class, this function will balance the + * number of cells on each processor. If one or more functions are + * connected, it will calculate the sum of the weights and balance the + * weights across processors. The only requirement on the weights is + * that every cell's weight is positive and that the sum over all + * weights on all processors can be formed using a 64-bit integer. + * Beyond that, it is your choice how you want to interpret the weights. + * A common approach is to consider the weights proportional to * the cost of doing computations on a cell, e.g., by summing * the time for assembly and solving. In practice, determining * this cost is of course not trivial since we don't solve on @@ -555,15 +541,9 @@ namespace parallel * (such as forming boundary integrals during the assembly * only on cells that are actually at the boundary, or computing * expensive nonlinear terms only on some cells but not others, - * e.g., in the elasto-plastic problem in step-42). In any - * case, the scaling does not matter: you can choose the weights - * equal to the number of unknowns on each cell, or equal to ten - * times the number of unknowns -- because only their relative - * size matters in partitioning, both choices will lead to the - * same mesh. + * e.g., in the elasto-plastic problem in step-42). */ - void repartition (const std::vector &weights_per_cell - = std::vector()); + void repartition (); /** * When vertices have been moved locally, for example using code like @@ -696,34 +676,6 @@ namespace parallel void load(const char *filename, const bool autopartition = true); - /** - * Used to inform in the callbacks of register_data_attach() and - * notify_ready_to_unpack() how the cell with the given cell_iterator is - * going to change. Note that this may me different than the - * refine_flag() and coarsen_flag() in the cell_iterator because of - * refinement constraints that this machine does not see. - */ - enum CellStatus - { - /** - * The cell will not be refined or coarsened and might or might not - * move to a different processor. - */ - CELL_PERSIST, - /** - * The cell will be or was refined. - */ - CELL_REFINE, - /** - * The children of this cell will be or were coarsened into this cell. - */ - CELL_COARSEN, - /** - * Invalid status. Will not occur for the user. - */ - CELL_INVALID - }; - /** * Register a function with the current Triangulation object that will * be used to attach data to active cells before @@ -823,7 +775,6 @@ namespace parallel (const std::vector > &); - private: /** @@ -965,6 +916,21 @@ namespace parallel */ void attach_mesh_data(); + /** + * Internal function notifying all registered slots to provide their + * weights before repartitioning occurs. Called from + * execute_coarsening_and_refinement() and repartition(). + * + * @param return A vector of unsigned integers representing the weight or + * computational load of every cell after the refinement/coarsening/ + * repartition cycle. Note that the number of entries does not need to + * be equal to either n_active_cells or n_locally_owned_active_cells, + * because the triangulation is not updated yet. The weights are sorted + * in the order that p4est will encounter them while iterating over them. + */ + std::vector + get_cell_weights(); + /** * Fills a map that, for each vertex, lists all the processors whose * subdomains are adjacent to that vertex. Used by diff --git a/include/deal.II/grid/tria.h b/include/deal.II/grid/tria.h index 7025568078..997636d7ff 100644 --- a/include/deal.II/grid/tria.h +++ b/include/deal.II/grid/tria.h @@ -1899,6 +1899,62 @@ public: * @{ */ + + /** + * Used to inform functions in derived classes how the cell with the given + * cell_iterator is going to change. Note that this may me different than + * the refine_flag() and coarsen_flag() in the cell_iterator in parallel + * calculations because of refinement constraints that this machine does not + * see. + */ + enum CellStatus + { + /** + * The cell will not be refined or coarsened and might or might not + * move to a different processor. + */ + CELL_PERSIST, + /** + * The cell will be or was refined. + */ + CELL_REFINE, + /** + * The children of this cell will be or were coarsened into this cell. + */ + CELL_COARSEN, + /** + * Invalid status. Will not occur for the user. + */ + CELL_INVALID + }; + + /** + * A structure used to accumulate the results of the cell_weights slot + * functions below. It takes an iterator range and returns the sum of values. + */ + template + struct sum + { + typedef T result_type; + + template + T operator()(InputIterator first, InputIterator last) const + { + // If there are no slots to call, just return the + // default-constructed value + if (first == last) + return T(); + + T sum = *first++; + while (first != last) + { + sum += *first++; + } + + return sum; + } + }; + /** * A structure that has boost::signal objects for a number of actions that a * triangulation can do to itself. Please refer to the @@ -1978,6 +2034,33 @@ public: * @p post_refinement_on_cell are not connected to this signal. */ boost::signals2::signal any_change; + + /** + * This signal is triggered for each cell during every automatic or manual + * repartitioning. This signal is + * somewhat special in that it is only triggered for distributed parallel + * calculations and only if functions are connected to it. It is intended to + * allow a weighted repartitioning of the domain to balance the computational + * load across processes in a different way than balancing the number of cells. + * Any connected function is expected to take an iterator to a cell, and a + * CellStatus argument that indicates whether this cell is going to be refined, + * coarsened or left untouched (see the documentation of the CellStatus enum + * for more information). The function is expected to return an unsigned + * integer, which is interpreted as the additional computational load of this + * cell. If this cell is going to be coarsened, the signal is called for the + * parent cell and you need to provide the weight of the future parent + * cell. If this cell is going to be refined the function should return a + * weight, which will be equally assigned to every future child + * cell of the current cell. As a reference a value of 1000 is added for + * every cell to the total weight. This means a signal return value of 1000 + * (resulting in a weight of 2000) means that it is twice as expensive for + * a process to handle this particular cell. If several functions are + * connected to this signal, their return values will be summed to calculate + * the final weight. + */ + boost::signals2::signal::sum > cell_weight; }; /** diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 5c92934683..c652254f57 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1407,7 +1407,7 @@ namespace quadrant_overlaps_tree (const_cast::tree *>(&tree), &p4est_cell) == false)) - return; //this quadrant and none of it's childs belongs to us. + return; //this quadrant and none of its children belongs to us. bool p4est_has_children = (idx == -1); @@ -1505,7 +1505,7 @@ namespace ptr); } - //mark other childs as invalid, so that unpack only happens once + //mark other children as invalid, so that unpack only happens once for (unsigned int i=1; i::max_children_per_cell; ++i) { int child_idx = sc_array_bsearch(const_cast(&tree.quadrants), @@ -1521,7 +1521,7 @@ namespace } else { - //it's children got coarsened into this cell + //its children got coarsened into this cell typename internal::p4est::types::quadrant *q; q = static_cast::quadrant *> ( sc_array_index (const_cast(&tree.quadrants), idx) @@ -1540,6 +1540,86 @@ namespace } } + template + void + get_cell_weights_recursively (const typename internal::p4est::types::tree &tree, + const typename Triangulation::cell_iterator &dealii_cell, + const typename internal::p4est::types::quadrant &p4est_cell, + const typename Triangulation::Signals &signals, + std::vector &weight) + { + const int idx = sc_array_bsearch(const_cast(&tree.quadrants), + &p4est_cell, + internal::p4est::functions::quadrant_compare); + + if (idx == -1 && (internal::p4est::functions:: + quadrant_overlaps_tree (const_cast::tree *>(&tree), + &p4est_cell) + == false)) + return; // This quadrant and none of its children belongs to us. + + const bool p4est_has_children = (idx == -1); + + if (p4est_has_children && dealii_cell->has_children()) + { + //recurse further + typename internal::p4est::types::quadrant + p4est_child[GeometryInfo::max_children_per_cell]; + for (unsigned int c=0; c::max_children_per_cell; ++c) + switch (dim) + { + case 2: + P4EST_QUADRANT_INIT(&p4est_child[c]); + break; + case 3: + P8EST_QUADRANT_INIT(&p4est_child[c]); + break; + default: + Assert (false, ExcNotImplemented()); + } + + internal::p4est::functions:: + quadrant_childrenv (&p4est_cell, p4est_child); + + for (unsigned int c=0; + c::max_children_per_cell; ++c) + { + get_cell_weights_recursively (tree, + dealii_cell->child(c), + p4est_child[c], + signals, + weight); + } + } + else if (!p4est_has_children && !dealii_cell->has_children()) + { + // This active cell didn't change + weight.push_back(1000); + weight.back() += signals.cell_weight(dealii_cell, + parallel::distributed::Triangulation::CELL_PERSIST); + } + else if (p4est_has_children) + { + // This cell will be refined + unsigned int parent_weight(1000); + parent_weight += signals.cell_weight(dealii_cell, + parallel::distributed::Triangulation::CELL_REFINE); + + for (unsigned int c=0; c::max_children_per_cell; ++c) + { + // We assign the weight of the parent cell equally to all children + weight.push_back(parent_weight); + } + } + else + { + // This cell's children will be coarsened into this cell + weight.push_back(1000); + weight.back() += signals.cell_weight(dealii_cell, + parallel::distributed::Triangulation::CELL_COARSEN); + } + } + template void @@ -1559,7 +1639,7 @@ namespace quadrant_overlaps_tree (const_cast::tree *>(&tree), &p4est_cell) == false)) - // this quadrant and none of it's children belong to us. + // this quadrant and none of its children belong to us. return; @@ -1955,10 +2035,12 @@ namespace class PartitionWeights { public: - PartitionWeights (const parallel::distributed::Triangulation &triangulation, - const std::vector &cell_weights, - const std::vector &p4est_tree_to_coarse_cell_permutation, - const types::subdomain_id my_subdomain); + /** + * This constructor assumes the cell_weights are already sorted in the + * order that p4est will encounter the cells, and they do not contain + * ghost cells or artificial cells. + */ + PartitionWeights (const std::vector &cell_weights); /** * A callback function that we pass to the p4est data structures when a @@ -1976,105 +2058,21 @@ namespace private: std::vector cell_weights_list; std::vector::const_iterator current_pointer; - - /** - * Recursively go through the cells of the p4est and deal.II triangulation - * and collect the partitioning weights. - */ - void - build_weight_list (const typename Triangulation::cell_iterator &cell, - const typename internal::p4est::types::quadrant &p4est_cell, - const types::subdomain_id my_subdomain, - const std::vector &cell_weights); }; template PartitionWeights:: - PartitionWeights (const parallel::distributed::Triangulation &triangulation, - const std::vector &cell_weights, - const std::vector &p4est_tree_to_coarse_cell_permutation, - const types::subdomain_id my_subdomain) + PartitionWeights (const std::vector &cell_weights) + : + cell_weights_list(cell_weights) { - Assert (cell_weights.size() == triangulation.n_active_cells(), ExcInternalError()); - - // build the cell_weights_list as an array over all locally owned - // active cells (i.e., a subset of the weights provided by the user, which - // are for all active cells), in the order in which p4est will encounter them - cell_weights_list.reserve (triangulation.n_locally_owned_active_cells()); - for (unsigned int c=0; c::cell_iterator - cell (&triangulation, 0, coarse_cell_index); - - typename internal::p4est::types::quadrant p4est_cell; - internal::p4est::functions:: - quadrant_set_morton (&p4est_cell, - /*level=*/0, - /*index=*/0); - p4est_cell.p.which_tree = c; - build_weight_list (cell, p4est_cell, my_subdomain, - cell_weights); - } - - // ensure that we built the list right - Assert(cell_weights_list.size() == triangulation.n_locally_owned_active_cells(), - ExcInternalError()); - // set the current pointer to the first element of the list, given that // we will walk through it sequentially current_pointer = cell_weights_list.begin(); } - template - void - PartitionWeights:: - build_weight_list (const typename Triangulation::cell_iterator &cell, - const typename internal::p4est::types::quadrant &p4est_cell, - const types::subdomain_id my_subdomain, - const std::vector &cell_weights) - { - if (!cell->has_children()) - { - if (cell->subdomain_id() == my_subdomain) - cell_weights_list.push_back (cell_weights[cell->active_cell_index()]); - } - else - { - typename internal::p4est::types::quadrant - p4est_child[GeometryInfo::max_children_per_cell]; - for (unsigned int c=0; c::max_children_per_cell; ++c) - switch (dim) - { - case 2: - P4EST_QUADRANT_INIT(&p4est_child[c]); - break; - case 3: - P8EST_QUADRANT_INIT(&p4est_child[c]); - break; - default: - Assert (false, ExcNotImplemented()); - } - internal::p4est::functions:: - quadrant_childrenv (&p4est_cell, - p4est_child); - for (unsigned int c=0; - c::max_children_per_cell; ++c) - { - p4est_child[c].p.which_tree = p4est_cell.p.which_tree; - build_weight_list (cell->child(c), - p4est_child[c], - my_subdomain, - cell_weights); - } - } - } - - template int PartitionWeights:: @@ -2097,7 +2095,6 @@ namespace // get the weight, increment the pointer, and return the weight return *this_object->current_pointer++; } - } @@ -2761,10 +2758,7 @@ namespace parallel // number of CPUs and so everything works without this call, but // this command changes the distribution for some reason, so we // will leave it in here. - dealii::internal::p4est::functions:: - partition (parallel_forest, - /* prepare coarsening */ 1, - /* weight_callback */ NULL); + repartition(); try { @@ -3572,21 +3566,41 @@ namespace parallel balance_type(P8EST_CONNECT_FULL)), /*init_callback=*/NULL); - // before repartitioning the mesh let others attach mesh related info // (such as SolutionTransfer data) to the p4est attach_mesh_data(); - // partition the new mesh between all processors. we cannot - // use weights for each cell here because the cells have changed - // from the time the user has called execute_c_and_r due to the - // mesh refinement and coarsening, and the user has not had time - // to attach cell weights yet if (!(settings & no_automatic_repartitioning)) - dealii::internal::p4est::functions:: - partition (parallel_forest, - /* prepare coarsening */ 1, - /* weight_callback */ NULL); + { + // partition the new mesh between all processors. If cell weights have + // not been given balance the number of cells. + if (this->signals.cell_weight.num_slots() == 0) + dealii::internal::p4est::functions:: + partition (parallel_forest, + /* prepare coarsening */ 1, + /* weight_callback */ NULL); + else + { + // get cell weights for a weighted repartitioning. + const std::vector cell_weights = get_cell_weights(); + + PartitionWeights partition_weights (cell_weights); + + // attach (temporarily) a pointer to the cell weights through p4est's + // user_pointer object + Assert (parallel_forest->user_pointer == this, + ExcInternalError()); + parallel_forest->user_pointer = &partition_weights; + + dealii::internal::p4est::functions:: + partition (parallel_forest, + /* prepare coarsening */ 1, + /* weight_callback */ &PartitionWeights::cell_weight); + + // reset the user pointer to its previous state + parallel_forest->user_pointer = this; + } + } // finally copy back from local part of tree to deal.II // triangulation. before doing so, make sure there are no refine or @@ -3618,7 +3632,7 @@ namespace parallel template void - Triangulation::repartition (const std::vector &cell_weights) + Triangulation::repartition () { AssertThrow(settings & no_automatic_repartitioning, ExcMessage("You need to set the 'no_automatic_repartitioning' flag in the " @@ -3641,7 +3655,7 @@ namespace parallel // (such as SolutionTransfer data) to the p4est attach_mesh_data(); - if (cell_weights.size() == 0) + if (this->signals.cell_weight.num_slots() == 0) { // no cell weights given -- call p4est's 'partition' without a // callback for cell weights @@ -3652,17 +3666,15 @@ namespace parallel } else { - AssertDimension (cell_weights.size(), this->n_active_cells()); + // get cell weights for a weighted repartitioning. + const std::vector cell_weights = get_cell_weights(); + + PartitionWeights partition_weights (cell_weights); - // copy the cell weights into the order in which p4est will - // encounter them, then attach (temporarily) a pointer to - // this list through p4est's user_pointer object + // attach (temporarily) a pointer to the cell weights through p4est's + // user_pointer object Assert (parallel_forest->user_pointer == this, ExcInternalError()); - PartitionWeights partition_weights (*this, - cell_weights, - p4est_tree_to_coarse_cell_permutation, - this->my_subdomain); parallel_forest->user_pointer = &partition_weights; dealii::internal::p4est::functions:: @@ -4742,6 +4754,57 @@ namespace parallel } } + template + std::vector + Triangulation:: + get_cell_weights() + { + // Allocate the space for the weights. In fact we do not know yet, how + // many cells we own after the refinement (only p4est knows that + // at this point). We simply reserve n_active_cells space and if many + // more cells are refined than coarsened than additional reallocation + // will be done inside get_cell_weights_recursively. + std::vector weights; + weights.reserve(this->n_active_cells()); + + // Recurse over p4est and Triangulation + // to find refined/coarsened/kept + // cells. Then append cell_weight. + // Note that we need to follow the p4est ordering + // instead of the deal.II ordering to get the cell_weights + // in the same order p4est will encounter them during repartitioning. + for (unsigned int c=0; cn_cells(0); ++c) + { + // skip coarse cells, that are not ours + if (tree_exists_locally(parallel_forest,c) == false) + continue; + + const unsigned int coarse_cell_index = + p4est_tree_to_coarse_cell_permutation[c]; + + const typename Triangulation::cell_iterator + dealii_coarse_cell (this, 0, coarse_cell_index); + + typename dealii::internal::p4est::types::quadrant p4est_coarse_cell; + dealii::internal::p4est::functions:: + quadrant_set_morton (&p4est_coarse_cell, + /*level=*/0, + /*index=*/0); + p4est_coarse_cell.p.which_tree = c; + + const typename dealii::internal::p4est::types::tree *tree = + init_tree(coarse_cell_index); + + get_cell_weights_recursively(*tree, + dealii_coarse_cell, + p4est_coarse_cell, + this->signals, + weights); + } + + return weights; + } + template typename dealii::Triangulation::cell_iterator cell_from_quad diff --git a/tests/mpi/cell_weights_01_back_and_forth_01.cc b/tests/mpi/cell_weights_01_back_and_forth_01.cc index ef79747e7e..c3befbb39b 100644 --- a/tests/mpi/cell_weights_01_back_and_forth_01.cc +++ b/tests/mpi/cell_weights_01_back_and_forth_01.cc @@ -35,6 +35,24 @@ #include +unsigned int current_cell_weight; + +template +unsigned int +cell_weight_1(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + return current_cell_weight++; +} + +template +unsigned int +cell_weight_2(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + return 1; +} + template void test() @@ -49,20 +67,21 @@ void test() GridGenerator::subdivided_hyper_cube(tr, 16); tr.refine_global(1); + current_cell_weight = 1; + // repartition the mesh as described above, first in some arbitrary // way, and then with all equal weights - { - std::vector weights (tr.n_active_cells()); - for (unsigned int i=0; i weights (tr.n_active_cells()); - for (unsigned int i=0; i, + std_cxx11::_1, + std_cxx11::_2)); + tr.repartition(); + + tr.signals.cell_weight.disconnect_all_slots(); + + tr.signals.cell_weight.connect(std::bind(&cell_weight_2, + std_cxx11::_1, + std_cxx11::_2)); + tr.repartition(); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) diff --git a/tests/mpi/cell_weights_01_back_and_forth_02.cc b/tests/mpi/cell_weights_01_back_and_forth_02.cc index 44e82f7b64..6486bb8dd2 100644 --- a/tests/mpi/cell_weights_01_back_and_forth_02.cc +++ b/tests/mpi/cell_weights_01_back_and_forth_02.cc @@ -34,6 +34,15 @@ #include +unsigned int current_cell_weight; + +template +unsigned int +cell_weight_1(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + return current_cell_weight++; +} template void test() @@ -48,17 +57,17 @@ void test() GridGenerator::subdivided_hyper_cube(tr, 16); tr.refine_global(1); + current_cell_weight = 1; + // repartition the mesh as described above, first in some arbitrary // way, and then with no weights - { - std::vector weights (tr.n_active_cells()); - for (unsigned int i=0; i, + std_cxx11::_1, + std_cxx11::_2)); + tr.repartition(); + + tr.signals.cell_weight.disconnect_all_slots(); + tr.repartition(); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) diff --git a/tests/mpi/cell_weights_02.cc b/tests/mpi/cell_weights_02.cc index 2ddea6d472..9be95619af 100644 --- a/tests/mpi/cell_weights_02.cc +++ b/tests/mpi/cell_weights_02.cc @@ -34,6 +34,13 @@ #include +template +unsigned int +cell_weight(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + return 100; +} template void test() @@ -42,15 +49,14 @@ void test() unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); parallel::distributed::Triangulation tr(MPI_COMM_WORLD, - dealii::Triangulation::none, - parallel::distributed::Triangulation::no_automatic_repartitioning); + dealii::Triangulation::none); GridGenerator::subdivided_hyper_cube(tr, 16); - tr.refine_global(1); - // repartition the mesh; attach equal weights to all cells - const std::vector weights (tr.n_active_cells(), 100U); - tr.repartition (weights); + tr.signals.cell_weight.connect(std::bind(&cell_weight, + std_cxx11::_1, + std_cxx11::_2)); + tr.refine_global(1); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; p +template +unsigned int +cell_weight(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + const unsigned int cell_weight = (cell->center()[0] < 0.5 + || + cell->center()[1] < 0.5 + ? + 0 + : + 3 * 1000); + + return cell_weight; +} template void test() @@ -46,21 +61,14 @@ void test() parallel::distributed::Triangulation::no_automatic_repartitioning); GridGenerator::subdivided_hyper_cube(tr, 16); + tr.refine_global(1); - // repartition the mesh; attach different weights to all cells - std::vector weights (tr.n_active_cells()); - for (typename Triangulation::active_cell_iterator - cell = tr.begin_active(); cell != tr.end(); ++cell) - weights[cell->active_cell_index()] - = (cell->center()[0] < 0.5 - || - cell->center()[1] < 0.5 - ? - 1 - : - 4); - tr.repartition (weights); + tr.signals.cell_weight.connect(std::bind(&cell_weight, + std_cxx11::_1, + std_cxx11::_2)); + + tr.repartition(); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; pis_locally_owned()) integrated_weights[myid] - += (cell->center()[0] < 0.5 - || - cell->center()[1] < 0.5 - ? - 1 - : - 4); + += 1000 + cell_weight(cell,parallel::distributed::Triangulation::CELL_PERSIST); + Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; p +template +unsigned int +cell_weight(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + const unsigned int cell_weight = (cell->center()[0] < 0.5 + || + cell->center()[1] < 0.5 + ? + 0 + : + 999 * 1000); + + return cell_weight; +} template void test() @@ -41,25 +55,15 @@ void test() unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); parallel::distributed::Triangulation tr(MPI_COMM_WORLD, - dealii::Triangulation::none, - parallel::distributed::Triangulation::no_automatic_repartitioning); + dealii::Triangulation::none); GridGenerator::subdivided_hyper_cube(tr, 16); - tr.refine_global(1); - // repartition the mesh; attach different weights to all cells - std::vector weights (tr.n_active_cells()); - for (typename Triangulation::active_cell_iterator - cell = tr.begin_active(); cell != tr.end(); ++cell) - weights[cell->active_cell_index()] - = (cell->center()[0] < 0.5 - || - cell->center()[1] < 0.5 - ? - 1 - : - 1000); - tr.repartition (weights); + tr.signals.cell_weight.connect(std::bind(&cell_weight, + std_cxx11::_1, + std_cxx11::_2)); + + tr.refine_global(1); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; pis_locally_owned()) integrated_weights[myid] - += (cell->center()[0] < 0.5 - || - cell->center()[1] < 0.5 - ? - 1 - : - 1000); + += 1000 + cell_weight(cell,parallel::distributed::Triangulation::CELL_PERSIST); + Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; p +template +unsigned int +cell_weight(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + return ( + // bottom left corner + (cell->center()[0] < 1) + && + (cell->center()[1] < 1) + && + (dim == 3 ? + (cell->center()[2] < 1) : + true) + ? + // one cell has more weight than all others together + std::pow(16,dim) * 1000 + : + 0); +} template void test() @@ -59,25 +79,11 @@ void test() // repartition the mesh; attach different weights to all cells - std::vector weights (tr.n_active_cells()); - for (typename Triangulation::active_cell_iterator - cell = tr.begin_active(); cell != tr.end(); ++cell) - weights[cell->active_cell_index()] - = ( - // bottom left corner - (cell->center()[0] < 1) - && - (cell->center()[1] < 1) - && - (dim == 3 ? - (cell->center()[2] < 1) : - true) - ? - // one cell has more weight than all others together - tr.n_global_active_cells() - : - 1); - tr.repartition (weights); + tr.signals.cell_weight.connect(std::bind(&cell_weight, + std_cxx11::_1, + std_cxx11::_2)); + + tr.repartition (); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; pis_locally_owned()) integrated_weights[myid] - += ( - // bottom left corner - (cell->center()[0] < 1) - && - (cell->center()[1] < 1) - && - (dim == 3 ? - (cell->center()[2] < 1) : - true) - ? - tr.n_global_active_cells() - : - 1); + += 1000 + cell_weight(cell,parallel::distributed::Triangulation::CELL_PERSIST); Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; p +unsigned int n_global_active_cells; + +template +unsigned int +cell_weight(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) +{ + return ( + // bottom left corner + (cell->center()[0] < 1) + && + (cell->center()[1] < 1) + && + (dim == 3 ? + (cell->center()[2] < 1) : + true) + ? + // one cell has more weight than all others together + n_global_active_cells * 1000 + : + 0); +} template void test() @@ -56,25 +78,11 @@ void test() tr.refine_global (1); // repartition the mesh; attach different weights to all cells - std::vector weights (tr.n_active_cells()); - for (typename Triangulation::active_cell_iterator - cell = tr.begin_active(); cell != tr.end(); ++cell) - weights[cell->active_cell_index()] - = ( - // bottom left corner - (cell->center()[0] < 1) - && - (cell->center()[1] < 1) - && - (dim == 3 ? - (cell->center()[2] < 1) : - true) - ? - // one cell has more weight than all others together - tr.n_global_active_cells() - : - 1); - tr.repartition (weights); + n_global_active_cells = tr.n_global_active_cells(); + tr.signals.cell_weight.connect(std::bind(&cell_weight, + std_cxx11::_1, + std_cxx11::_2)); + tr.repartition (); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; pis_locally_owned()) integrated_weights[myid] - += ( - // bottom left corner - (cell->center()[0] < 1) - && - (cell->center()[1] < 1) - && - (dim == 3 ? - (cell->center()[2] < 1) : - true) - ? - tr.n_global_active_cells() - : - 1); + += 1000 + cell_weight(cell,parallel::distributed::Triangulation::CELL_PERSIST); Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights); if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) for (unsigned int p=0; p