/**
* Given a hp::DoFHandler object, make sure that the active_fe_indices
* that a user has set for locally owned cells are communicated to all
- * ghost cells as well.
+ * other relevant cells as well.
+ *
+ * For parallel::shared::Triangulation objects,
+ * this information is distributed on both ghost and artificial cells.
+ *
+ * In case a parallel::distributed::Triangulation is used,
+ * active_fe_indices are communicated only to ghost cells.
*/
template <int dim, int spacedim>
static void
tr->get_communicator(),
active_fe_indices);
- // now go back and fill the active_fe_index on ghost
+ // now go back and fill the active_fe_index on all other
// cells. we would like to call cell->set_active_fe_index(),
// but that function does not allow setting these indices on
// non-locally_owned cells. so we have to work around the
// issue a little bit by accessing the underlying data
// structures directly
for (const auto &cell : dof_handler.active_cell_iterators())
- if (cell->is_ghost())
+ if (!cell->is_locally_owned())
dof_handler.levels[cell->level()]->set_active_fe_index(
cell->index(),
active_fe_indices[cell->active_cell_index()]);
++child_index)
{
const auto &child = parent->child(child_index);
-
- if (child->is_locally_owned())
- {
- Assert(child->active(), ExcInternalError());
- child->set_active_fe_index(pair.second);
- }
+ Assert(child->is_locally_owned() && child->active(),
+ ExcInternalError());
+ child->set_active_fe_index(pair.second);
}
}
for (const auto &pair : fe_transfer->coarsened_cells_fe_index)
{
const auto &cell = pair.first;
-
- if (cell->is_locally_owned())
- {
- Assert(cell->active(), ExcInternalError());
- cell->set_active_fe_index(pair.second);
- }
+ Assert(cell->is_locally_owned() && cell->active(),
+ ExcInternalError());
+ cell->set_active_fe_index(pair.second);
}
}
};
// stored as protected data of this object, but for simplicity we
// use the cell-wise access.
for (const auto &cell : active_cell_iterators())
- if (cell->is_locally_owned())
+ if (!cell->is_artificial())
active_fe_indices[cell->active_cell_index()] = cell->active_fe_index();
}
void
DoFHandler<dim, spacedim>::set_fe(const hp::FECollection<dim, spacedim> &ff)
{
+ Assert(
+ tria != nullptr,
+ ExcMessage(
+ "You need to set the Triangulation in the DoFHandler using initialize() or "
+ "in the constructor before you can distribute DoFs."));
+ Assert(tria->n_levels() > 0,
+ ExcMessage("The Triangulation you are using is empty!"));
+ Assert(ff.size() > 0, ExcMessage("The hp::FECollection given is empty!"));
+
// don't create a new object if the one we have is already appropriate
if (fe_collection != ff)
fe_collection = hp::FECollection<dim, spacedim>(ff);
// ensure that the active_fe_indices vectors are initialized correctly
create_active_fe_table();
+ // initialize all p-refinement and p-coarsening flags
+ create_p_adaptation_flags();
+
// make sure every processor knows the active_fe_indices
// on both its own cells and all ghost cells
dealii::internal::hp::DoFHandlerImplementation::Implementation::
// make sure that the fe collection is large enough to
// cover all fe indices presently in use on the mesh
for (const auto &cell : active_cell_iterators())
- if (cell->is_locally_owned())
+ if (!cell->is_artificial())
Assert(cell->active_fe_index() < fe_collection.size(),
ExcInvalidFEIndex(cell->active_fe_index(),
fe_collection.size()));
-
- // initialize all p-refinement and p-coarsening flags
- create_p_adaptation_flags();
}
DoFHandler<dim, spacedim>::distribute_dofs(
const hp::FECollection<dim, spacedim> &ff)
{
- Assert(
- tria != nullptr,
- ExcMessage(
- "You need to set the Triangulation in the DoFHandler using initialize() or "
- "in the constructor before you can distribute DoFs."));
- Assert(tria->n_levels() > 0,
- ExcMessage("The Triangulation you are using is empty!"));
- Assert(ff.size() > 0, ExcMessage("The hp::FECollection given is empty!"));
+ // assign the fe_collection and initialize all active_fe_indices
+ set_fe(ff);
// If an underlying shared::Tria allows artificial cells,
// then save the current set of subdomain ids, and set
// subdomain ids to the "true" owner of each cell. we later
// restore these flags
- std::vector<types::subdomain_id> saved_subdomain_ids;
- if (const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
- (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
- &get_triangulation())))
- if (shared_tria->with_artificial_cells())
- {
- saved_subdomain_ids.resize(shared_tria->n_active_cells());
-
- const std::vector<types::subdomain_id> &true_subdomain_ids =
- shared_tria->get_true_subdomain_ids_of_cells();
+ std::vector<types::subdomain_id> saved_subdomain_ids;
+ const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
+ (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
+ &get_triangulation()));
+ if (shared_tria != nullptr && shared_tria->with_artificial_cells())
+ {
+ saved_subdomain_ids.resize(shared_tria->n_active_cells());
- for (const auto &cell : shared_tria->active_cell_iterators())
- {
- const unsigned int index = cell->active_cell_index();
- saved_subdomain_ids[index] = cell->subdomain_id();
- cell->set_subdomain_id(true_subdomain_ids[index]);
- }
- }
+ const std::vector<types::subdomain_id> &true_subdomain_ids =
+ shared_tria->get_true_subdomain_ids_of_cells();
- // assign the fe_collection and initialize all active_fe_indices
- set_fe(ff);
+ for (const auto &cell : shared_tria->active_cell_iterators())
+ {
+ const unsigned int index = cell->active_cell_index();
+ saved_subdomain_ids[index] = cell->subdomain_id();
+ cell->set_subdomain_id(true_subdomain_ids[index]);
+ }
+ }
// then allocate space for all the other tables
dealii::internal::hp::DoFHandlerImplementation::Implementation::
reserve_space(*this);
// now undo the subdomain modification
- if (const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
- (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
- &get_triangulation())))
- if (shared_tria->with_artificial_cells())
- for (const auto &cell : shared_tria->active_cell_iterators())
- cell->set_subdomain_id(
- saved_subdomain_ids[cell->active_cell_index()]);
+ if (shared_tria != nullptr && shared_tria->with_artificial_cells())
+ for (const auto &cell : shared_tria->active_cell_iterators())
+ cell->set_subdomain_id(saved_subdomain_ids[cell->active_cell_index()]);
// Clear user flags because we will need them. But first we save
internal::DoFHandlerImplementation::Policy::ParallelDistributed<
DoFHandler<dim, spacedim>>>(*this);
+ // repartitioning signals
tria_listeners.push_back(
this->tria->signals.pre_distributed_repartition.connect(std::bind(
&DoFHandler<dim,
spacedim>::post_distributed_active_fe_index_transfer,
std::ref(*this))));
+ // refinement signals
tria_listeners.push_back(
this->tria->signals.pre_distributed_refinement.connect(std::bind(
&DoFHandler<dim,
spacedim>::post_distributed_active_fe_index_transfer,
std::ref(*this))));
+ // serialization signals
tria_listeners.push_back(
this->tria->signals.post_distributed_save.connect(
std::bind(&DoFHandler<dim, spacedim>::
post_distributed_serialization_of_active_fe_indices,
std::ref(*this))));
}
- else if (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>
- *>(&this->get_triangulation()) != nullptr)
- {
- policy =
- std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
- ParallelShared<DoFHandler<dim, spacedim>>>(
- *this);
-
- tria_listeners.push_back(
- this->tria->signals.pre_refinement.connect(std::bind(
- &DoFHandler<dim, spacedim>::pre_shared_active_fe_index_transfer,
- std::ref(*this))));
- tria_listeners.push_back(
- this->tria->signals.post_refinement.connect(std::bind(
- &DoFHandler<dim, spacedim>::post_shared_active_fe_index_transfer,
- std::ref(*this))));
- }
else
{
- policy =
- std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
- Sequential<DoFHandler<dim, spacedim>>>(
- *this);
+ if (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>
+ *>(&this->get_triangulation()) != nullptr)
+ policy =
+ std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
+ ParallelShared<DoFHandler<dim, spacedim>>>(
+ *this);
+ else
+ policy =
+ std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
+ Sequential<DoFHandler<dim, spacedim>>>(
+ *this);
+ // refinement signals
tria_listeners.push_back(this->tria->signals.pre_refinement.connect(
std::bind(&DoFHandler<dim, spacedim>::pre_active_fe_index_transfer,
std::ref(*this))));
- template <int dim, int spacedim>
- void
- DoFHandler<dim, spacedim>::pre_shared_active_fe_index_transfer()
- {
-#ifndef DEAL_II_WITH_MPI
- Assert(false, ExcInternalError());
-#else
- // Finite elements need to be assigned to each cell by calling
- // distribute_dofs() first to make this functionality available.
- if (fe_collection.size() > 0)
- {
- Assert(active_fe_index_transfer == nullptr, ExcInternalError());
-
- active_fe_index_transfer =
- std_cxx14::make_unique<ActiveFEIndexTransfer>();
-
- // If the underlying shared::Tria allows artificial cells,
- // then save the current set of subdomain ids, and set
- // subdomain ids to the "true" owner of each cell. We later
- // restore these flags.
- const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
- (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
- &(*tria)));
- Assert(shared_tria != nullptr, ExcInternalError());
-
- std::vector<types::subdomain_id> saved_subdomain_ids;
- if (shared_tria->with_artificial_cells())
- {
- saved_subdomain_ids.resize(shared_tria->n_active_cells());
-
- const std::vector<types::subdomain_id> &true_subdomain_ids =
- shared_tria->get_true_subdomain_ids_of_cells();
-
- for (const auto &cell : active_cell_iterators())
- {
- const unsigned int index = cell->active_cell_index();
- saved_subdomain_ids[index] = cell->subdomain_id();
- cell->set_subdomain_id(true_subdomain_ids[index]);
- }
-
- // Make sure every processor knows the active_fe_indices
- // on both its own cells and all ghost cells.
- dealii::internal::hp::DoFHandlerImplementation::Implementation::
- communicate_active_fe_indices(*this);
- }
-
- // Now do what we would do in the sequential case.
- dealii::internal::hp::DoFHandlerImplementation::Implementation::
- collect_fe_indices_on_cells_to_be_refined(*this);
-
- // Finally, restore current subdomain_ids.
- if (shared_tria->with_artificial_cells())
- for (const auto &cell : active_cell_iterators())
- {
- if (cell->is_artificial())
- cell->set_subdomain_id(numbers::invalid_subdomain_id);
- else
- cell->set_subdomain_id(
- saved_subdomain_ids[cell->active_cell_index()]);
- }
- }
-#endif
- }
-
-
-
template <int dim, int spacedim>
void
DoFHandler<dim, spacedim>::pre_distributed_active_fe_index_transfer()
for (unsigned int child_index = 0;
child_index < pair.first->n_children();
++child_index)
- active_fe_index_transfer
- ->active_fe_indices[pair.first->child(child_index)
- ->active_cell_index()] = pair.second;
+ {
+ // Make sure that all children belong to the same subdomain.
+ Assert(pair.first->child(child_index)->is_locally_owned(),
+ ExcInternalError());
+
+ active_fe_index_transfer
+ ->active_fe_indices[pair.first->child(child_index)
+ ->active_cell_index()] = pair.second;
+ }
// Create transfer object and attach to it.
const auto *distributed_tria = dynamic_cast<
{
Assert(active_fe_index_transfer != nullptr, ExcInternalError());
- // For Triangulation and p::s::Triangulation, the old cell iterators
- // are still valid. There is no need to transfer data in this case,
- // and we can re-use our previously gathered information from the
- // container.
-
- dealii::internal::hp::DoFHandlerImplementation::Implementation::
- distribute_fe_indices_on_refined_cells(*this);
-
- // Free memory.
- active_fe_index_transfer.reset();
- }
- }
-
-
-
- template <int dim, int spacedim>
- void
- DoFHandler<dim, spacedim>::post_shared_active_fe_index_transfer()
- {
-#ifndef DEAL_II_WITH_MPI
- Assert(false, ExcInternalError());
-#else
- // Finite elements need to be assigned to each cell by calling
- // distribute_dofs() first to make this functionality available.
- if (fe_collection.size() > 0)
- {
- Assert(active_fe_index_transfer != nullptr, ExcInternalError());
-
- // Do what we normally do in the sequential case.
dealii::internal::hp::DoFHandlerImplementation::Implementation::
distribute_fe_indices_on_refined_cells(*this);
// We have to distribute the information about active_fe_indices
- // on all processors, if a parallel::shared::Triangulation
- // has been used.
+ // of all cells (including the artificial ones) on all processors,
+ // if a parallel::shared::Triangulation has been used.
dealii::internal::hp::DoFHandlerImplementation::Implementation::
communicate_active_fe_indices(*this);
// Free memory.
active_fe_index_transfer.reset();
}
-#endif
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Cell Weights Test
+// -----------------
+// Create a 4x4(x4) grid, on which all cells are associated with a Q1
+// element besides the very first one, which has a Q5 element.
+// We choose a cell weighting algorithm based on the number of degrees
+// of freedom and check if load is balanced as expected after
+// repartitioning the triangulation. The expected accumulated weight on
+// each processor should correlate to the sum of all degrees of
+// freedom on all cells of the corresponding subdomain.
+// We employ a large proportionality factor on our weighting function
+// to neglect the standard weight of '1000' per cell.
+//
+// This test works on a parallel::shared::Triangulation with METIS
+// as a partitioner. Cell weighting with ZOLTAN was not available
+// during the time this test was written.
+//
+// We consider aritifical cells in this case.
+
+
+#include <deal.II/distributed/cell_weights.h>
+#include <deal.II/distributed/shared_tria.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/hp/dof_handler.h>
+
+#include "../tests.h"
+
+
+template <int dim>
+void
+test()
+{
+ parallel::shared::Triangulation<dim> tria(
+ MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ true,
+ parallel::shared::Triangulation<dim>::Settings::partition_metis);
+
+ GridGenerator::hyper_cube(tria);
+ tria.refine_global(2);
+
+ // Apply ndof cell weights.
+ hp::FECollection<dim> fe_collection;
+ fe_collection.push_back(FE_Q<dim>(1));
+ fe_collection.push_back(FE_Q<dim>(5));
+
+ hp::DoFHandler<dim> dh(tria);
+ dh.set_fe(fe_collection);
+ // default: active_fe_index = 0
+ for (auto &cell : dh.active_cell_iterators())
+ if (cell->is_locally_owned())
+ if (cell->id().to_string() == "0_2:00")
+ cell->set_active_fe_index(1);
+
+ deallog << "Number of cells before repartitioning: "
+ << tria.n_locally_owned_active_cells() << std::endl;
+ {
+ unsigned int dof_counter = 0;
+ for (auto &cell : dh.active_cell_iterators())
+ if (cell->is_locally_owned())
+ dof_counter += cell->get_fe().dofs_per_cell;
+ deallog << " Cumulative dofs per cell: " << dof_counter << std::endl;
+ }
+
+
+ parallel::CellWeights<dim> cell_weights(dh);
+ cell_weights.register_ndofs_weighting(100000);
+
+ // we didn't mark any cells, but we want to repartition our domain
+ tria.execute_coarsening_and_refinement();
+
+
+ deallog << "Number of cells after repartitioning: "
+ << tria.n_locally_owned_active_cells() << std::endl;
+ {
+ unsigned int dof_counter = 0;
+ for (auto &cell : dh.active_cell_iterators())
+ if (cell->is_locally_owned())
+ dof_counter += cell->get_fe().dofs_per_cell;
+ deallog << " Cumulative dofs per cell: " << dof_counter << std::endl;
+ }
+
+ // make sure no processor is hanging
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ deallog << "OK" << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}