*/
typedef typename dealii::Triangulation<dim,spacedim>::active_cell_iterator active_cell_iterator;
+ typedef typename dealii::Triangulation<dim,spacedim>::CellStatus CellStatus;
+
/**
* Configuration flags for distributed Triangulations to be set in the
* constructor. Settings can be combined using bitwise OR.
* affected by flags set on locally owned cells.
*
* @note This function by default partitions the mesh in such a way
- * that the number of cells on all processors is roughly equal,
- * i.e., it is not possible to "weigh" cells that are more expensive
- * to compute on than others. This is because these weights would apply
- * to the current set of cells, which will then be refined and
- * coarsened into a separate set of cells, which will only then be
- * partitioned between processors. In other words, the weights for
- * cells you could attach before calling this function will no
- * longer be the set of cells that will ultimately be partitioned.
- * If you want to set weights for partitioning, you need to create
- * your triangulation object by passing the
+ * that the number of cells on all processors is roughly equal.
+ * If you want to set weights for partitioning, e.g. because some cells
+ * are more expensive to compute than others, you can use the signal
+ * cell_weight as documented in the dealii::Triangulation class. This
+ * function will check whether a function is connected to the signal
+ * and if so use it. If you prefer to repartition the mesh yourself at
+ * user-defined intervals only, you can create your triangulation
+ * object by passing the
* parallel::distributed::Triangulation::no_automatic_repartitioning
* flag to the constructor, which ensures that calling the current
* function only refines and coarsens the triangulation, but doesn't
- * partition it. You would then call the current function, in a second
- * step compute and use cell weights when partitioning the mesh upon
- * calling repartition() with a non-default argument.
+ * partition it. You can then call the repartition() function manually.
+ * The usage of the cell_weights signal is identical in both cases,
+ * if a function is connected to the signal it will be used to balance
+ * the calculated weights, otherwise the number of cells is balanced.
*/
virtual void execute_coarsening_and_refinement ();
* the same way as execute_coarsening_and_refinement() with respect to
* dealing with data movement (SolutionTransfer, etc.).
*
- * @param weights_per_cell A vector of weights that indicates how
- * "expensive" computations are on each of the active cells. If
- * left at its default value, this function will partition the
- * mesh in such a way that it has roughly equal numbers of cells
- * on all processors. If the argument is specified, then the mesh
- * is partitioned so that the sum of weights of the cells owned
- * by each processor is roughly equal. The size of the vector
- * needs to equal the number of active cells of the current
- * triangulation (i.e., must be equal to
- * <code>triangulation.n_active_cells()</code>) and be in the order
- * in which one encounters these cells in a loop over all cells (in
- * the same way as vectors of refinement indicators are interpreted
- * in namespace GridRefinement). In other words, the element of this
- * vector that corresponds to a given cell has index
- * <code>cell-@>active_cell_index()</code>. Of the elements of this
- * vector, only those that correspond to <i>locally owned</i>
- * active cells are actually considered. You can set the rest
- * to arbitrary values.
- *
- * @note The only requirement on the weights is that every cell's
- * weight is positive and that the sum over all weights on all
- * processors can be formed using a 64-bit integer. Beyond that,
- * it is your choice how you want to interpret the weights. A
- * common approach is to consider the weights proportional to
+ * @note If no function is connected to the cell_weight signal described
+ * in the dealii::Triangulation class, this function will balance the
+ * number of cells on each processor. If one or more functions are
+ * connected, it will calculate the sum of the weights and balance the
+ * weights across processors. The only requirement on the weights is
+ * that every cell's weight is positive and that the sum over all
+ * weights on all processors can be formed using a 64-bit integer.
+ * Beyond that, it is your choice how you want to interpret the weights.
+ * A common approach is to consider the weights proportional to
* the cost of doing computations on a cell, e.g., by summing
* the time for assembly and solving. In practice, determining
* this cost is of course not trivial since we don't solve on
* (such as forming boundary integrals during the assembly
* only on cells that are actually at the boundary, or computing
* expensive nonlinear terms only on some cells but not others,
- * e.g., in the elasto-plastic problem in step-42). In any
- * case, the scaling does not matter: you can choose the weights
- * equal to the number of unknowns on each cell, or equal to ten
- * times the number of unknowns -- because only their relative
- * size matters in partitioning, both choices will lead to the
- * same mesh.
+ * e.g., in the elasto-plastic problem in step-42).
*/
- void repartition (const std::vector<unsigned int> &weights_per_cell
- = std::vector<unsigned int>());
+ void repartition ();
/**
* When vertices have been moved locally, for example using code like
void load(const char *filename,
const bool autopartition = true);
- /**
- * Used to inform in the callbacks of register_data_attach() and
- * notify_ready_to_unpack() how the cell with the given cell_iterator is
- * going to change. Note that this may me different than the
- * refine_flag() and coarsen_flag() in the cell_iterator because of
- * refinement constraints that this machine does not see.
- */
- enum CellStatus
- {
- /**
- * The cell will not be refined or coarsened and might or might not
- * move to a different processor.
- */
- CELL_PERSIST,
- /**
- * The cell will be or was refined.
- */
- CELL_REFINE,
- /**
- * The children of this cell will be or were coarsened into this cell.
- */
- CELL_COARSEN,
- /**
- * Invalid status. Will not occur for the user.
- */
- CELL_INVALID
- };
-
/**
* Register a function with the current Triangulation object that will
* be used to attach data to active cells before
(const std::vector<GridTools::PeriodicFacePair<cell_iterator> > &);
-
private:
/**
*/
void attach_mesh_data();
+ /**
+ * Internal function notifying all registered slots to provide their
+ * weights before repartitioning occurs. Called from
+ * execute_coarsening_and_refinement() and repartition().
+ *
+ * @param return A vector of unsigned integers representing the weight or
+ * computational load of every cell after the refinement/coarsening/
+ * repartition cycle. Note that the number of entries does not need to
+ * be equal to either n_active_cells or n_locally_owned_active_cells,
+ * because the triangulation is not updated yet. The weights are sorted
+ * in the order that p4est will encounter them while iterating over them.
+ */
+ std::vector<unsigned int>
+ get_cell_weights();
+
/**
* Fills a map that, for each vertex, lists all the processors whose
* subdomains are adjacent to that vertex. Used by
* @{
*/
+
+ /**
+ * Used to inform functions in derived classes how the cell with the given
+ * cell_iterator is going to change. Note that this may me different than
+ * the refine_flag() and coarsen_flag() in the cell_iterator in parallel
+ * calculations because of refinement constraints that this machine does not
+ * see.
+ */
+ enum CellStatus
+ {
+ /**
+ * The cell will not be refined or coarsened and might or might not
+ * move to a different processor.
+ */
+ CELL_PERSIST,
+ /**
+ * The cell will be or was refined.
+ */
+ CELL_REFINE,
+ /**
+ * The children of this cell will be or were coarsened into this cell.
+ */
+ CELL_COARSEN,
+ /**
+ * Invalid status. Will not occur for the user.
+ */
+ CELL_INVALID
+ };
+
+ /**
+ * A structure used to accumulate the results of the cell_weights slot
+ * functions below. It takes an iterator range and returns the sum of values.
+ */
+ template<typename T>
+ struct sum
+ {
+ typedef T result_type;
+
+ template<typename InputIterator>
+ T operator()(InputIterator first, InputIterator last) const
+ {
+ // If there are no slots to call, just return the
+ // default-constructed value
+ if (first == last)
+ return T();
+
+ T sum = *first++;
+ while (first != last)
+ {
+ sum += *first++;
+ }
+
+ return sum;
+ }
+ };
+
/**
* A structure that has boost::signal objects for a number of actions that a
* triangulation can do to itself. Please refer to the
* @p post_refinement_on_cell are not connected to this signal.
*/
boost::signals2::signal<void ()> any_change;
+
+ /**
+ * This signal is triggered for each cell during every automatic or manual
+ * repartitioning. This signal is
+ * somewhat special in that it is only triggered for distributed parallel
+ * calculations and only if functions are connected to it. It is intended to
+ * allow a weighted repartitioning of the domain to balance the computational
+ * load across processes in a different way than balancing the number of cells.
+ * Any connected function is expected to take an iterator to a cell, and a
+ * CellStatus argument that indicates whether this cell is going to be refined,
+ * coarsened or left untouched (see the documentation of the CellStatus enum
+ * for more information). The function is expected to return an unsigned
+ * integer, which is interpreted as the additional computational load of this
+ * cell. If this cell is going to be coarsened, the signal is called for the
+ * parent cell and you need to provide the weight of the future parent
+ * cell. If this cell is going to be refined the function should return a
+ * weight, which will be equally assigned to every future child
+ * cell of the current cell. As a reference a value of 1000 is added for
+ * every cell to the total weight. This means a signal return value of 1000
+ * (resulting in a weight of 2000) means that it is twice as expensive for
+ * a process to handle this particular cell. If several functions are
+ * connected to this signal, their return values will be summed to calculate
+ * the final weight.
+ */
+ boost::signals2::signal<unsigned int (const cell_iterator &,
+ const CellStatus),
+ Triangulation<dim,spacedim>::sum<unsigned int> > cell_weight;
};
/**
quadrant_overlaps_tree (const_cast<typename internal::p4est::types<dim>::tree *>(&tree),
&p4est_cell)
== false))
- return; //this quadrant and none of it's childs belongs to us.
+ return; //this quadrant and none of its children belongs to us.
bool p4est_has_children = (idx == -1);
ptr);
}
- //mark other childs as invalid, so that unpack only happens once
+ //mark other children as invalid, so that unpack only happens once
for (unsigned int i=1; i<GeometryInfo<dim>::max_children_per_cell; ++i)
{
int child_idx = sc_array_bsearch(const_cast<sc_array_t *>(&tree.quadrants),
}
else
{
- //it's children got coarsened into this cell
+ //its children got coarsened into this cell
typename internal::p4est::types<dim>::quadrant *q;
q = static_cast<typename internal::p4est::types<dim>::quadrant *> (
sc_array_index (const_cast<sc_array_t *>(&tree.quadrants), idx)
}
}
+ template <int dim, int spacedim>
+ void
+ get_cell_weights_recursively (const typename internal::p4est::types<dim>::tree &tree,
+ const typename Triangulation<dim,spacedim>::cell_iterator &dealii_cell,
+ const typename internal::p4est::types<dim>::quadrant &p4est_cell,
+ const typename Triangulation<dim,spacedim>::Signals &signals,
+ std::vector<unsigned int> &weight)
+ {
+ const int idx = sc_array_bsearch(const_cast<sc_array_t *>(&tree.quadrants),
+ &p4est_cell,
+ internal::p4est::functions<dim>::quadrant_compare);
+
+ if (idx == -1 && (internal::p4est::functions<dim>::
+ quadrant_overlaps_tree (const_cast<typename internal::p4est::types<dim>::tree *>(&tree),
+ &p4est_cell)
+ == false))
+ return; // This quadrant and none of its children belongs to us.
+
+ const bool p4est_has_children = (idx == -1);
+
+ if (p4est_has_children && dealii_cell->has_children())
+ {
+ //recurse further
+ typename internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+
+ internal::p4est::functions<dim>::
+ quadrant_childrenv (&p4est_cell, p4est_child);
+
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ {
+ get_cell_weights_recursively<dim,spacedim> (tree,
+ dealii_cell->child(c),
+ p4est_child[c],
+ signals,
+ weight);
+ }
+ }
+ else if (!p4est_has_children && !dealii_cell->has_children())
+ {
+ // This active cell didn't change
+ weight.push_back(1000);
+ weight.back() += signals.cell_weight(dealii_cell,
+ parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST);
+ }
+ else if (p4est_has_children)
+ {
+ // This cell will be refined
+ unsigned int parent_weight(1000);
+ parent_weight += signals.cell_weight(dealii_cell,
+ parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE);
+
+ for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ {
+ // We assign the weight of the parent cell equally to all children
+ weight.push_back(parent_weight);
+ }
+ }
+ else
+ {
+ // This cell's children will be coarsened into this cell
+ weight.push_back(1000);
+ weight.back() += signals.cell_weight(dealii_cell,
+ parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN);
+ }
+ }
+
template <int dim, int spacedim>
void
quadrant_overlaps_tree (const_cast<typename internal::p4est::types<dim>::tree *>(&tree),
&p4est_cell)
== false))
- // this quadrant and none of it's children belong to us.
+ // this quadrant and none of its children belong to us.
return;
class PartitionWeights
{
public:
- PartitionWeights (const parallel::distributed::Triangulation<dim,spacedim> &triangulation,
- const std::vector<unsigned int> &cell_weights,
- const std::vector<types::global_dof_index> &p4est_tree_to_coarse_cell_permutation,
- const types::subdomain_id my_subdomain);
+ /**
+ * This constructor assumes the cell_weights are already sorted in the
+ * order that p4est will encounter the cells, and they do not contain
+ * ghost cells or artificial cells.
+ */
+ PartitionWeights (const std::vector<unsigned int> &cell_weights);
/**
* A callback function that we pass to the p4est data structures when a
private:
std::vector<unsigned int> cell_weights_list;
std::vector<unsigned int>::const_iterator current_pointer;
-
- /**
- * Recursively go through the cells of the p4est and deal.II triangulation
- * and collect the partitioning weights.
- */
- void
- build_weight_list (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const typename internal::p4est::types<dim>::quadrant &p4est_cell,
- const types::subdomain_id my_subdomain,
- const std::vector<unsigned int> &cell_weights);
};
template <int dim, int spacedim>
PartitionWeights<dim,spacedim>::
- PartitionWeights (const parallel::distributed::Triangulation<dim,spacedim> &triangulation,
- const std::vector<unsigned int> &cell_weights,
- const std::vector<types::global_dof_index> &p4est_tree_to_coarse_cell_permutation,
- const types::subdomain_id my_subdomain)
+ PartitionWeights (const std::vector<unsigned int> &cell_weights)
+ :
+ cell_weights_list(cell_weights)
{
- Assert (cell_weights.size() == triangulation.n_active_cells(), ExcInternalError());
-
- // build the cell_weights_list as an array over all locally owned
- // active cells (i.e., a subset of the weights provided by the user, which
- // are for all active cells), in the order in which p4est will encounter them
- cell_weights_list.reserve (triangulation.n_locally_owned_active_cells());
- for (unsigned int c=0; c<triangulation.n_cells(0); ++c)
- {
- unsigned int coarse_cell_index =
- p4est_tree_to_coarse_cell_permutation[c];
-
- const typename Triangulation<dim,spacedim>::cell_iterator
- cell (&triangulation, 0, coarse_cell_index);
-
- typename internal::p4est::types<dim>::quadrant p4est_cell;
- internal::p4est::functions<dim>::
- quadrant_set_morton (&p4est_cell,
- /*level=*/0,
- /*index=*/0);
- p4est_cell.p.which_tree = c;
- build_weight_list (cell, p4est_cell, my_subdomain,
- cell_weights);
- }
-
- // ensure that we built the list right
- Assert(cell_weights_list.size() == triangulation.n_locally_owned_active_cells(),
- ExcInternalError());
-
// set the current pointer to the first element of the list, given that
// we will walk through it sequentially
current_pointer = cell_weights_list.begin();
}
- template <int dim, int spacedim>
- void
- PartitionWeights<dim,spacedim>::
- build_weight_list (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const typename internal::p4est::types<dim>::quadrant &p4est_cell,
- const types::subdomain_id my_subdomain,
- const std::vector<unsigned int> &cell_weights)
- {
- if (!cell->has_children())
- {
- if (cell->subdomain_id() == my_subdomain)
- cell_weights_list.push_back (cell_weights[cell->active_cell_index()]);
- }
- else
- {
- typename internal::p4est::types<dim>::quadrant
- p4est_child[GeometryInfo<dim>::max_children_per_cell];
- for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
- switch (dim)
- {
- case 2:
- P4EST_QUADRANT_INIT(&p4est_child[c]);
- break;
- case 3:
- P8EST_QUADRANT_INIT(&p4est_child[c]);
- break;
- default:
- Assert (false, ExcNotImplemented());
- }
- internal::p4est::functions<dim>::
- quadrant_childrenv (&p4est_cell,
- p4est_child);
- for (unsigned int c=0;
- c<GeometryInfo<dim>::max_children_per_cell; ++c)
- {
- p4est_child[c].p.which_tree = p4est_cell.p.which_tree;
- build_weight_list (cell->child(c),
- p4est_child[c],
- my_subdomain,
- cell_weights);
- }
- }
- }
-
-
template <int dim, int spacedim>
int
PartitionWeights<dim,spacedim>::
// get the weight, increment the pointer, and return the weight
return *this_object->current_pointer++;
}
-
}
// number of CPUs and so everything works without this call, but
// this command changes the distribution for some reason, so we
// will leave it in here.
- dealii::internal::p4est::functions<dim>::
- partition (parallel_forest,
- /* prepare coarsening */ 1,
- /* weight_callback */ NULL);
+ repartition();
try
{
balance_type(P8EST_CONNECT_FULL)),
/*init_callback=*/NULL);
-
// before repartitioning the mesh let others attach mesh related info
// (such as SolutionTransfer data) to the p4est
attach_mesh_data();
- // partition the new mesh between all processors. we cannot
- // use weights for each cell here because the cells have changed
- // from the time the user has called execute_c_and_r due to the
- // mesh refinement and coarsening, and the user has not had time
- // to attach cell weights yet
if (!(settings & no_automatic_repartitioning))
- dealii::internal::p4est::functions<dim>::
- partition (parallel_forest,
- /* prepare coarsening */ 1,
- /* weight_callback */ NULL);
+ {
+ // partition the new mesh between all processors. If cell weights have
+ // not been given balance the number of cells.
+ if (this->signals.cell_weight.num_slots() == 0)
+ dealii::internal::p4est::functions<dim>::
+ partition (parallel_forest,
+ /* prepare coarsening */ 1,
+ /* weight_callback */ NULL);
+ else
+ {
+ // get cell weights for a weighted repartitioning.
+ const std::vector<unsigned int> cell_weights = get_cell_weights();
+
+ PartitionWeights<dim,spacedim> partition_weights (cell_weights);
+
+ // attach (temporarily) a pointer to the cell weights through p4est's
+ // user_pointer object
+ Assert (parallel_forest->user_pointer == this,
+ ExcInternalError());
+ parallel_forest->user_pointer = &partition_weights;
+
+ dealii::internal::p4est::functions<dim>::
+ partition (parallel_forest,
+ /* prepare coarsening */ 1,
+ /* weight_callback */ &PartitionWeights<dim,spacedim>::cell_weight);
+
+ // reset the user pointer to its previous state
+ parallel_forest->user_pointer = this;
+ }
+ }
// finally copy back from local part of tree to deal.II
// triangulation. before doing so, make sure there are no refine or
template <int dim, int spacedim>
void
- Triangulation<dim,spacedim>::repartition (const std::vector<unsigned int> &cell_weights)
+ Triangulation<dim,spacedim>::repartition ()
{
AssertThrow(settings & no_automatic_repartitioning,
ExcMessage("You need to set the 'no_automatic_repartitioning' flag in the "
// (such as SolutionTransfer data) to the p4est
attach_mesh_data();
- if (cell_weights.size() == 0)
+ if (this->signals.cell_weight.num_slots() == 0)
{
// no cell weights given -- call p4est's 'partition' without a
// callback for cell weights
}
else
{
- AssertDimension (cell_weights.size(), this->n_active_cells());
+ // get cell weights for a weighted repartitioning.
+ const std::vector<unsigned int> cell_weights = get_cell_weights();
+
+ PartitionWeights<dim,spacedim> partition_weights (cell_weights);
- // copy the cell weights into the order in which p4est will
- // encounter them, then attach (temporarily) a pointer to
- // this list through p4est's user_pointer object
+ // attach (temporarily) a pointer to the cell weights through p4est's
+ // user_pointer object
Assert (parallel_forest->user_pointer == this,
ExcInternalError());
- PartitionWeights<dim,spacedim> partition_weights (*this,
- cell_weights,
- p4est_tree_to_coarse_cell_permutation,
- this->my_subdomain);
parallel_forest->user_pointer = &partition_weights;
dealii::internal::p4est::functions<dim>::
}
}
+ template <int dim, int spacedim>
+ std::vector<unsigned int>
+ Triangulation<dim,spacedim>::
+ get_cell_weights()
+ {
+ // Allocate the space for the weights. In fact we do not know yet, how
+ // many cells we own after the refinement (only p4est knows that
+ // at this point). We simply reserve n_active_cells space and if many
+ // more cells are refined than coarsened than additional reallocation
+ // will be done inside get_cell_weights_recursively.
+ std::vector<unsigned int> weights;
+ weights.reserve(this->n_active_cells());
+
+ // Recurse over p4est and Triangulation
+ // to find refined/coarsened/kept
+ // cells. Then append cell_weight.
+ // Note that we need to follow the p4est ordering
+ // instead of the deal.II ordering to get the cell_weights
+ // in the same order p4est will encounter them during repartitioning.
+ for (unsigned int c=0; c<this->n_cells(0); ++c)
+ {
+ // skip coarse cells, that are not ours
+ if (tree_exists_locally<dim,spacedim>(parallel_forest,c) == false)
+ continue;
+
+ const unsigned int coarse_cell_index =
+ p4est_tree_to_coarse_cell_permutation[c];
+
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ dealii_coarse_cell (this, 0, coarse_cell_index);
+
+ typename dealii::internal::p4est::types<dim>::quadrant p4est_coarse_cell;
+ dealii::internal::p4est::functions<dim>::
+ quadrant_set_morton (&p4est_coarse_cell,
+ /*level=*/0,
+ /*index=*/0);
+ p4est_coarse_cell.p.which_tree = c;
+
+ const typename dealii::internal::p4est::types<dim>::tree *tree =
+ init_tree(coarse_cell_index);
+
+ get_cell_weights_recursively<dim,spacedim>(*tree,
+ dealii_coarse_cell,
+ p4est_coarse_cell,
+ this->signals,
+ weights);
+ }
+
+ return weights;
+ }
+
template <int dim, int spacedim>
typename dealii::Triangulation<dim,spacedim>::cell_iterator
cell_from_quad
#include <fstream>
+unsigned int current_cell_weight;
+
+template <int dim>
+unsigned int
+cell_weight_1(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ return current_cell_weight++;
+}
+
+template <int dim>
+unsigned int
+cell_weight_2(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ return 1;
+}
+
template<int dim>
void test()
GridGenerator::subdivided_hyper_cube(tr, 16);
tr.refine_global(1);
+ current_cell_weight = 1;
+
// repartition the mesh as described above, first in some arbitrary
// way, and then with all equal weights
- {
- std::vector<unsigned int> weights (tr.n_active_cells());
- for (unsigned int i=0; i<weights.size(); ++i)
- weights[i] = i+1;
- tr.repartition (weights);
- }
- {
- std::vector<unsigned int> weights (tr.n_active_cells());
- for (unsigned int i=0; i<weights.size(); ++i)
- weights[i] = 1;
- tr.repartition (weights);
- }
+ tr.signals.cell_weight.connect(std::bind(&cell_weight_1<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+ tr.repartition();
+
+ tr.signals.cell_weight.disconnect_all_slots();
+
+ tr.signals.cell_weight.connect(std::bind(&cell_weight_2<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+ tr.repartition();
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
#include <fstream>
+unsigned int current_cell_weight;
+
+template <int dim>
+unsigned int
+cell_weight_1(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ return current_cell_weight++;
+}
template<int dim>
void test()
GridGenerator::subdivided_hyper_cube(tr, 16);
tr.refine_global(1);
+ current_cell_weight = 1;
+
// repartition the mesh as described above, first in some arbitrary
// way, and then with no weights
- {
- std::vector<unsigned int> weights (tr.n_active_cells());
- for (unsigned int i=0; i<weights.size(); ++i)
- weights[i] = i+1;
- tr.repartition (weights);
- }
- {
- tr.repartition ();
- }
+ tr.signals.cell_weight.connect(std::bind(&cell_weight_1<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+ tr.repartition();
+
+ tr.signals.cell_weight.disconnect_all_slots();
+ tr.repartition();
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
#include <fstream>
+template <int dim>
+unsigned int
+cell_weight(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ return 100;
+}
template<int dim>
void test()
unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
- dealii::Triangulation<dim>::none,
- parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+ dealii::Triangulation<dim>::none);
GridGenerator::subdivided_hyper_cube(tr, 16);
- tr.refine_global(1);
- // repartition the mesh; attach equal weights to all cells
- const std::vector<unsigned int> weights (tr.n_active_cells(), 100U);
- tr.repartition (weights);
+ tr.signals.cell_weight.connect(std::bind(&cell_weight<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+ tr.refine_global(1);
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
#include <fstream>
+template <int dim>
+unsigned int
+cell_weight(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ const unsigned int cell_weight = (cell->center()[0] < 0.5
+ ||
+ cell->center()[1] < 0.5
+ ?
+ 0
+ :
+ 3 * 1000);
+
+ return cell_weight;
+}
template<int dim>
void test()
parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
GridGenerator::subdivided_hyper_cube(tr, 16);
+
tr.refine_global(1);
- // repartition the mesh; attach different weights to all cells
- std::vector<unsigned int> weights (tr.n_active_cells());
- for (typename Triangulation<dim>::active_cell_iterator
- cell = tr.begin_active(); cell != tr.end(); ++cell)
- weights[cell->active_cell_index()]
- = (cell->center()[0] < 0.5
- ||
- cell->center()[1] < 0.5
- ?
- 1
- :
- 4);
- tr.repartition (weights);
+ tr.signals.cell_weight.connect(std::bind(&cell_weight<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+
+ tr.repartition();
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
cell = tr.begin_active(); cell != tr.end(); ++cell)
if (cell->is_locally_owned())
integrated_weights[myid]
- += (cell->center()[0] < 0.5
- ||
- cell->center()[1] < 0.5
- ?
- 1
- :
- 4);
+ += 1000 + cell_weight<dim>(cell,parallel::distributed::Triangulation<dim>::CELL_PERSIST);
+
Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
DEAL:2d::processor 2: 128 locally owned active cells
DEAL:2d::processor 3: 92 locally owned active cells
DEAL:2d::processor 4: 88 locally owned active cells
-DEAL:2d::processor 0: 360.000 weight
-DEAL:2d::processor 1: 356.000 weight
-DEAL:2d::processor 2: 356.000 weight
-DEAL:2d::processor 3: 368.000 weight
-DEAL:2d::processor 4: 352.000 weight
+DEAL:2d::processor 0: 360000. weight
+DEAL:2d::processor 1: 356000. weight
+DEAL:2d::processor 2: 356000. weight
+DEAL:2d::processor 3: 368000. weight
+DEAL:2d::processor 4: 352000. weight
DEAL:3d::processor 0: 11472 locally owned active cells
DEAL:3d::processor 1: 3480 locally owned active cells
DEAL:3d::processor 2: 7168 locally owned active cells
DEAL:3d::processor 3: 7784 locally owned active cells
DEAL:3d::processor 4: 2864 locally owned active cells
-DEAL:3d::processor 0: 11472.0 weight
-DEAL:3d::processor 1: 11472.0 weight
-DEAL:3d::processor 2: 11464.0 weight
-DEAL:3d::processor 3: 11480.0 weight
-DEAL:3d::processor 4: 11456.0 weight
+DEAL:3d::processor 0: 1.14720e+07 weight
+DEAL:3d::processor 1: 1.14720e+07 weight
+DEAL:3d::processor 2: 1.14640e+07 weight
+DEAL:3d::processor 3: 1.14800e+07 weight
+DEAL:3d::processor 4: 1.14560e+07 weight
// just create a 16x16 coarse mesh, refine it once, and partition it
-//
// like _03, but with a larger spread of weights
#include "../tests.h"
#include <fstream>
+template <int dim>
+unsigned int
+cell_weight(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ const unsigned int cell_weight = (cell->center()[0] < 0.5
+ ||
+ cell->center()[1] < 0.5
+ ?
+ 0
+ :
+ 999 * 1000);
+
+ return cell_weight;
+}
template<int dim>
void test()
unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
- dealii::Triangulation<dim>::none,
- parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+ dealii::Triangulation<dim>::none);
GridGenerator::subdivided_hyper_cube(tr, 16);
- tr.refine_global(1);
- // repartition the mesh; attach different weights to all cells
- std::vector<unsigned int> weights (tr.n_active_cells());
- for (typename Triangulation<dim>::active_cell_iterator
- cell = tr.begin_active(); cell != tr.end(); ++cell)
- weights[cell->active_cell_index()]
- = (cell->center()[0] < 0.5
- ||
- cell->center()[1] < 0.5
- ?
- 1
- :
- 1000);
- tr.repartition (weights);
+ tr.signals.cell_weight.connect(std::bind(&cell_weight<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+
+ tr.refine_global(1);
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
cell = tr.begin_active(); cell != tr.end(); ++cell)
if (cell->is_locally_owned())
integrated_weights[myid]
- += (cell->center()[0] < 0.5
- ||
- cell->center()[1] < 0.5
- ?
- 1
- :
- 1000);
+ += 1000 + cell_weight<dim>(cell,parallel::distributed::Triangulation<dim>::CELL_PERSIST);
+
Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
DEAL:2d::processor 2: 52 locally owned active cells
DEAL:2d::processor 3: 48 locally owned active cells
DEAL:2d::processor 4: 52 locally owned active cells
-DEAL:2d::processor 0: 52768.0 weight
-DEAL:2d::processor 1: 52000.0 weight
-DEAL:2d::processor 2: 52000.0 weight
-DEAL:2d::processor 3: 48000.0 weight
-DEAL:2d::processor 4: 52000.0 weight
+DEAL:2d::processor 0: 5.27680e+07 weight
+DEAL:2d::processor 1: 5.20000e+07 weight
+DEAL:2d::processor 2: 5.20000e+07 weight
+DEAL:2d::processor 3: 4.80000e+07 weight
+DEAL:2d::processor 4: 5.20000e+07 weight
DEAL:3d::processor 0: 13920 locally owned active cells
DEAL:3d::processor 1: 1640 locally owned active cells
DEAL:3d::processor 2: 13920 locally owned active cells
DEAL:3d::processor 3: 1648 locally owned active cells
DEAL:3d::processor 4: 1640 locally owned active cells
-DEAL:3d::processor 0: 1.64429e+06 weight
-DEAL:3d::processor 1: 1.64000e+06 weight
-DEAL:3d::processor 2: 1.64429e+06 weight
-DEAL:3d::processor 3: 1.64800e+06 weight
-DEAL:3d::processor 4: 1.64000e+06 weight
+DEAL:3d::processor 0: 1.64429e+09 weight
+DEAL:3d::processor 1: 1.64000e+09 weight
+DEAL:3d::processor 2: 1.64429e+09 weight
+DEAL:3d::processor 3: 1.64800e+09 weight
+DEAL:3d::processor 4: 1.64000e+09 weight
#include <fstream>
+template <int dim>
+unsigned int
+cell_weight(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ return (
+ // bottom left corner
+ (cell->center()[0] < 1)
+ &&
+ (cell->center()[1] < 1)
+ &&
+ (dim == 3 ?
+ (cell->center()[2] < 1) :
+ true)
+ ?
+ // one cell has more weight than all others together
+ std::pow(16,dim) * 1000
+ :
+ 0);
+}
template<int dim>
void test()
// repartition the mesh; attach different weights to all cells
- std::vector<unsigned int> weights (tr.n_active_cells());
- for (typename Triangulation<dim>::active_cell_iterator
- cell = tr.begin_active(); cell != tr.end(); ++cell)
- weights[cell->active_cell_index()]
- = (
- // bottom left corner
- (cell->center()[0] < 1)
- &&
- (cell->center()[1] < 1)
- &&
- (dim == 3 ?
- (cell->center()[2] < 1) :
- true)
- ?
- // one cell has more weight than all others together
- tr.n_global_active_cells()
- :
- 1);
- tr.repartition (weights);
+ tr.signals.cell_weight.connect(std::bind(&cell_weight<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+
+ tr.repartition ();
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
cell = tr.begin_active(); cell != tr.end(); ++cell)
if (cell->is_locally_owned())
integrated_weights[myid]
- += (
- // bottom left corner
- (cell->center()[0] < 1)
- &&
- (cell->center()[1] < 1)
- &&
- (dim == 3 ?
- (cell->center()[2] < 1) :
- true)
- ?
- tr.n_global_active_cells()
- :
- 1);
+ += 1000 + cell_weight<dim>(cell,parallel::distributed::Triangulation<dim>::CELL_PERSIST);
Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
DEAL:2d::processor 0: 1 locally owned active cells
-DEAL:2d::processor 1: 84 locally owned active cells
-DEAL:2d::processor 2: 171 locally owned active cells
-DEAL:2d::processor 0: 256.000 weight
-DEAL:2d::processor 1: 84.0000 weight
-DEAL:2d::processor 2: 171.000 weight
+DEAL:2d::processor 1: 85 locally owned active cells
+DEAL:2d::processor 2: 170 locally owned active cells
+DEAL:2d::processor 0: 257000. weight
+DEAL:2d::processor 1: 85000.0 weight
+DEAL:2d::processor 2: 170000. weight
DEAL:3d::processor 0: 1 locally owned active cells
-DEAL:3d::processor 1: 1364 locally owned active cells
-DEAL:3d::processor 2: 2731 locally owned active cells
-DEAL:3d::processor 0: 4096.00 weight
-DEAL:3d::processor 1: 1364.00 weight
-DEAL:3d::processor 2: 2731.00 weight
+DEAL:3d::processor 1: 1365 locally owned active cells
+DEAL:3d::processor 2: 2730 locally owned active cells
+DEAL:3d::processor 0: 4.09700e+06 weight
+DEAL:3d::processor 1: 1.36500e+06 weight
+DEAL:3d::processor 2: 2.73000e+06 weight
DEAL:2d::processor 0: 1 locally owned active cells
DEAL:2d::processor 1: 0 locally owned active cells
-DEAL:2d::processor 2: 50 locally owned active cells
+DEAL:2d::processor 2: 51 locally owned active cells
DEAL:2d::processor 3: 102 locally owned active cells
-DEAL:2d::processor 4: 103 locally owned active cells
-DEAL:2d::processor 0: 256.000 weight
+DEAL:2d::processor 4: 102 locally owned active cells
+DEAL:2d::processor 0: 257000. weight
DEAL:2d::processor 1: 0 weight
-DEAL:2d::processor 2: 50.0000 weight
-DEAL:2d::processor 3: 102.000 weight
-DEAL:2d::processor 4: 103.000 weight
+DEAL:2d::processor 2: 51000.0 weight
+DEAL:2d::processor 3: 102000. weight
+DEAL:2d::processor 4: 102000. weight
DEAL:3d::processor 0: 1 locally owned active cells
DEAL:3d::processor 1: 0 locally owned active cells
-DEAL:3d::processor 2: 818 locally owned active cells
+DEAL:3d::processor 2: 819 locally owned active cells
DEAL:3d::processor 3: 1638 locally owned active cells
-DEAL:3d::processor 4: 1639 locally owned active cells
-DEAL:3d::processor 0: 4096.00 weight
+DEAL:3d::processor 4: 1638 locally owned active cells
+DEAL:3d::processor 0: 4.09700e+06 weight
DEAL:3d::processor 1: 0 weight
-DEAL:3d::processor 2: 818.000 weight
-DEAL:3d::processor 3: 1638.00 weight
-DEAL:3d::processor 4: 1639.00 weight
+DEAL:3d::processor 2: 819000. weight
+DEAL:3d::processor 3: 1.63800e+06 weight
+DEAL:3d::processor 4: 1.63800e+06 weight
#include <fstream>
+unsigned int n_global_active_cells;
+
+template <int dim>
+unsigned int
+cell_weight(const typename parallel::distributed::Triangulation<dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim>::CellStatus status)
+{
+ return (
+ // bottom left corner
+ (cell->center()[0] < 1)
+ &&
+ (cell->center()[1] < 1)
+ &&
+ (dim == 3 ?
+ (cell->center()[2] < 1) :
+ true)
+ ?
+ // one cell has more weight than all others together
+ n_global_active_cells * 1000
+ :
+ 0);
+}
template<int dim>
void test()
tr.refine_global (1);
// repartition the mesh; attach different weights to all cells
- std::vector<unsigned int> weights (tr.n_active_cells());
- for (typename Triangulation<dim>::active_cell_iterator
- cell = tr.begin_active(); cell != tr.end(); ++cell)
- weights[cell->active_cell_index()]
- = (
- // bottom left corner
- (cell->center()[0] < 1)
- &&
- (cell->center()[1] < 1)
- &&
- (dim == 3 ?
- (cell->center()[2] < 1) :
- true)
- ?
- // one cell has more weight than all others together
- tr.n_global_active_cells()
- :
- 1);
- tr.repartition (weights);
+ n_global_active_cells = tr.n_global_active_cells();
+ tr.signals.cell_weight.connect(std::bind(&cell_weight<dim>,
+ std_cxx11::_1,
+ std_cxx11::_2));
+ tr.repartition ();
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
cell = tr.begin_active(); cell != tr.end(); ++cell)
if (cell->is_locally_owned())
integrated_weights[myid]
- += (
- // bottom left corner
- (cell->center()[0] < 1)
- &&
- (cell->center()[1] < 1)
- &&
- (dim == 3 ?
- (cell->center()[2] < 1) :
- true)
- ?
- tr.n_global_active_cells()
- :
- 1);
+ += 1000 + cell_weight<dim>(cell,parallel::distributed::Triangulation<dim>::CELL_PERSIST);
Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int p=0; p<numproc; ++p)
DEAL:2d::processor 0: 0 locally owned active cells
-DEAL:2d::processor 1: 84 locally owned active cells
-DEAL:2d::processor 2: 172 locally owned active cells
+DEAL:2d::processor 1: 88 locally owned active cells
+DEAL:2d::processor 2: 168 locally owned active cells
DEAL:2d::processor 0: 0 weight
-DEAL:2d::processor 1: 339.000 weight
-DEAL:2d::processor 2: 172.000 weight
+DEAL:2d::processor 1: 344000. weight
+DEAL:2d::processor 2: 168000. weight
DEAL:3d::processor 0: 0 locally owned active cells
DEAL:3d::processor 1: 1368 locally owned active cells
DEAL:3d::processor 2: 2728 locally owned active cells
DEAL:3d::processor 0: 0 weight
-DEAL:3d::processor 1: 5463.00 weight
-DEAL:3d::processor 2: 2728.00 weight
+DEAL:3d::processor 1: 5.46400e+06 weight
+DEAL:3d::processor 2: 2.72800e+06 weight