* the cell id, the subdomain_id and the level_subdomain_id as well as
* information related to manifold_id and boundary_id.
*
+ * @note Similarly to dealii::CellData, this structure stores information
+ * about a cell. However, in contrast to dealii::CellData, it also stores
+ * a unique id, partitioning information, and information related to cell
+ * faces and edges.
+ *
* @author Peter Munch, 2019
*/
template <int dim>
struct CellData
{
- /**
- * Constructor.
- */
- CellData() = default;
-
/**
* Boost serialization function
*/
/**
* Manifold id of the cell.
*/
- types::material_id manifold_id;
+ types::manifold_id manifold_id;
/**
* Manifold id of all lines of the cell.
*
* @param tria Partitioned input triangulation.
* @param comm MPI_Communicator to be used. In the case
- * of dealii::distributed::Triangulation, the communicators
- * have to match.
+ * of dealii::distributed::Triangulation, the communicators have to match.
* @param construct_multilevel_hierarchy Signal if the multigrid levels
* should be constructed.
- * @param my_rank_in Construct Description for this rank (only
- * working for serial triangulations).
- * @return Description to be used to setup a Triangulation.
+ * @param my_rank_in Construct Description for the specified rank (only
+ * working for serial triangulations that have been partitioned by
+ * functions like GridToold::partition_triangulation()).
+ * @return Description to be used to set up a Triangulation.
*
* @note Multilevel hierarchies are supported if it is enabled in
- * parallel::fullydistributed::Triangulation.
+ * parallel::fullydistributed::Triangulation.
*
* @note Hanging nodes in the input triangulation are supported. However,
- * to be able to use this
- * feature in the case of parallel::fullydistributed::Triangulation,
- * the user has to enable multilevel hierarchy support in
- * parallel::fullydistributed::Triangulation.
+ * to be able to use this feature in the case of
+ * parallel::fullydistributed::Triangulation, the user has to enable
+ * multilevel hierarchy support in
+ * parallel::fullydistributed::Triangulation.
*
* @author Peter Munch, 2019
*/
/**
- * Construct a construction::Description. In contrast
+ * Construct a TriangulationDescription::Description. In contrast
* to the function above, this function is also responsible for creating
* a serial triangulation and for its partitioning (by calling the
- * provided std::functions). Internally only selected processes (every
- * n-th/each root of a group of size group_size) create a serial
+ * provided `std::functions' objects). Internally only selected processes (
+ * every n-th/each root of a group of size group_size) create a serial
* triangulation and the ConstructionData for all processes in its group,
* which is communicated.
*
* @note A reasonable group size is the size of a NUMA domain or the
* size of a compute node.
*
- * @param serial_grid_generator A function, which creates a serial triangulation.
- * @param serial_grid_partitioner A function, which can partition a serial
- * triangulation, i.e., sets the sudomain_ids of the active cells.
- * The function takes as the first argument a serial triangulation,
- * as the second argument the MPI communicator, and as the third
- * argument the group size.
- * @param comm MPI communicator
+ * @param serial_grid_generator A function which creates a serial triangulation.
+ * @param serial_grid_partitioner A function which can partition a serial
+ * triangulation, i.e., sets the sudomain_ids of the active cells.
+ * The function takes as the first argument a serial triangulation,
+ * as the second argument the MPI communicator, and as the third
+ * argument the group size.
+ * @param comm MPI communicator.
* @param group_size The size of each group.
* @param construct_multilevel_hierarchy Construct multigrid levels.
- * @return Description to be used to setup a Triangulation.
+ * @return Description to be used to set up a Triangulation.
*
* @author Peter Munch, 2019
*/
*/
template <int dim, int spacedim>
void
- set_user_flag_reverse(TriaIterator<CellAccessor<dim, spacedim>> cell)
+ set_user_flag_and_of_its_parents(
+ const TriaIterator<CellAccessor<dim, spacedim>> &cell)
{
cell->set_user_flag();
if (cell->level() != 0)
- set_user_flag_reverse(cell->parent());
+ set_user_flag_and_of_its_parents(cell->parent());
}
{
if (auto tria_pdt = dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(&tria))
- AssertThrow(comm == tria_pdt->get_communicator(),
- ExcMessage("MPI communicators do not match."));
+ Assert(comm == tria_pdt->get_communicator(),
+ ExcMessage("MPI communicators do not match."));
// First, figure out for what rank we are supposed to build the
// ConstructionData object
unsigned int my_rank = my_rank_in;
- AssertThrow(my_rank == numbers::invalid_unsigned_int ||
- my_rank < dealii::Utilities::MPI::n_mpi_processes(comm),
- ExcMessage(
- "Rank has to be smaller than available processes."));
+ Assert(my_rank == numbers::invalid_unsigned_int ||
+ my_rank < dealii::Utilities::MPI::n_mpi_processes(comm),
+ ExcMessage("Rank has to be smaller than available processes."));
if (auto tria_pdt = dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(&tria))
{
- AssertThrow(
+ Assert(
my_rank == numbers::invalid_unsigned_int ||
my_rank == dealii::Utilities::MPI::this_mpi_process(comm),
ExcMessage(
}
else
{
- AssertThrow(
- false, ExcMessage("This type of triangulation is not supported!"));
+ Assert(false,
+ ExcMessage("This type of triangulation is not supported!"));
}
Description<dim, spacedim> construction_data;
// check if multilevel hierarchy should be constructed
if (construct_multilevel_hierarchy == false)
{
- AssertThrow(
+ Assert(
tria.has_hanging_nodes() == false,
ExcMessage(
"Hanging nodes are only supported if multilevel hierarchy is constructed!"));
// 1b) loop over levels (from fine to coarse) and mark on each level
// the locally relevant cells
- for (unsigned int level =
- tria.get_triangulation().n_global_levels() - 1;
- level != numbers::invalid_unsigned_int;
- level--)
+ for (int level = tria.get_triangulation().n_global_levels() - 1;
+ level >= 0;
+ --level)
{
// collect vertices connected to a (on any level) locally owned
// cell
// mark all locally relevant cells
for (auto cell : tria.cell_iterators_on_level(level))
if (is_locally_relevant_on_level(cell))
- set_user_flag_reverse(cell);
+ set_user_flag_and_of_its_parents(cell);
}
// 2) set_up coarse-grid triangulation
std::min(group_root + group_size,
dealii::Utilities::MPI::n_mpi_processes(comm));
- // 3) create ConstructionData for the other processes in group
+ // 3) create Description for the other processes in group; since
+ // we expect that this function is called for huge meshes, one
+ // Description is created at a time and send away; only once the
+ // Description has been sent away, the next rank is processed.
for (unsigned int other_rank = group_root + 1; other_rank < end_group;
- other_rank++)
+ ++other_rank)
{
// 3a) create construction data for other ranks
const auto construction_data =
len,
MPI_CHAR,
status.MPI_SOURCE,
- status.MPI_TAG,
+ mpi_tag,
comm,
&status);
AssertThrowMPI(ierr);