+ template <int dim, int spacedim>
+ void
+ TriangulationBase<dim, spacedim>::reset_global_cell_indices()
+ {
+#ifndef DEAL_II_WITH_MPI
+ Assert(false, ExcNeedsMPI());
+#else
+
+ // currently only implemented for distributed triangulations
+ if (dynamic_cast<const parallel::DistributedTriangulationBase<dim, spacedim>
+ *>(this) == nullptr)
+ return;
+
+ // 1) determine number of active locally-owned cells
+ const types::global_cell_index n_locally_owned_cells =
+ this->n_locally_owned_active_cells();
+
+ // 2) determine the offset of each process
+ types::global_cell_index cell_index = 0;
+
+ MPI_Exscan(&n_locally_owned_cells,
+ &cell_index,
+ 1,
+ Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells),
+ MPI_SUM,
+ this->mpi_communicator);
+
+ // 3) give global indices to locally-owned cells and mark all other cells as
+ // invalid
+ for (const auto &cell : this->active_cell_iterators())
+ if (cell->is_locally_owned())
+ cell->set_global_active_cell_index(cell_index++);
+ else
+ cell->set_global_active_cell_index(numbers::invalid_dof_index);
+
+ // 4) determine the global indices of ghost cells
+ GridTools::exchange_cell_data_to_ghosts<types::global_cell_index>(
+ *this,
+ [](const auto &cell) { return cell->global_active_cell_index(); },
+ [](const auto &cell, const auto &id) {
+ cell->set_global_active_cell_index(id);
+ });
+
+ // 5) set up new partitioner
+ IndexSet is_local(this->n_global_active_cells());
+ IndexSet is_ghost(this->n_global_active_cells());
+
+ for (const auto &cell : this->active_cell_iterators())
+ if (!cell->is_artificial())
+ {
+ const auto index = cell->global_active_cell_index();
+
+ if (index == numbers::invalid_dof_index)
+ continue;
+
+ if (cell->is_locally_owned())
+ is_local.add_index(index);
+ else
+ is_ghost.add_index(index);
+ }
+
+ number_cache.active_cell_index_partitioner =
+ Utilities::MPI::Partitioner(is_local, is_ghost, this->mpi_communicator);
+
+ // 6) proceed with multigrid levels if requested
+ if (this->is_multilevel_hierarchy_constructed() == true)
+ {
+ // 1) determine number of locally-owned cells on levels
+ std::vector<types::global_cell_index> n_locally_owned_cells(
+ this->n_global_levels(), 0);
+
+ for (auto cell : this->cell_iterators())
+ if (cell->level_subdomain_id() == this->locally_owned_subdomain())
+ n_locally_owned_cells[cell->level()]++;
+
+ // 2) determine the offset of each process
+ std::vector<types::global_cell_index> cell_index(
+ this->n_global_levels(), 0);
+
+ MPI_Exscan(n_locally_owned_cells.data(),
+ cell_index.data(),
+ this->n_global_levels(),
+ Utilities::MPI::internal::mpi_type_id(
+ n_locally_owned_cells.data()),
+ MPI_SUM,
+ this->mpi_communicator);
+
+ // 3) determine global number of "active" cells on each level
+ std::vector<types::global_cell_index> n_cells_level(
+ this->n_global_levels(), 0);
+
+ for (unsigned int l = 0; l < this->n_global_levels(); ++l)
+ n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l];
+
+ MPI_Bcast(n_cells_level.data(),
+ this->n_global_levels(),
+ Utilities::MPI::internal::mpi_type_id(n_cells_level.data()),
+ this->n_subdomains - 1,
+ this->mpi_communicator);
+
+ // 4) give global indices to locally-owned cells on level and mark
+ // all other cells as invalid
+ for (auto cell : this->cell_iterators())
+ if (cell->level_subdomain_id() == this->locally_owned_subdomain())
+ cell->set_global_level_cell_index(cell_index[cell->level()]++);
+ else
+ cell->set_global_level_cell_index(numbers::invalid_dof_index);
+
+ // 5) update the numbers of ghost level cells
+ GridTools::exchange_cell_data_to_level_ghosts<
+ types::global_cell_index,
+ dealii::Triangulation<dim, spacedim>>(
+ *this,
+ [](const auto &cell) { return cell->global_level_cell_index(); },
+ [](const auto &cell, const auto &id) {
+ return cell->set_global_level_cell_index(id);
+ });
+
+ number_cache.level_cell_index_partitioners.resize(
+ this->n_global_levels());
+
+ // 6) set up cell partitioners for each level
+ for (unsigned int l = 0; l < this->n_global_levels(); ++l)
+ {
+ IndexSet is_local(n_cells_level[l]);
+ IndexSet is_ghost(n_cells_level[l]);
+
+ for (const auto &cell : this->cell_iterators_on_level(l))
+ if (cell->level_subdomain_id() !=
+ dealii::numbers::artificial_subdomain_id)
+ {
+ const auto index = cell->global_level_cell_index();
+
+ if (index == numbers::invalid_dof_index)
+ continue;
+
+ if (cell->level_subdomain_id() ==
+ this->locally_owned_subdomain())
+ is_local.add_index(index);
+ else
+ is_ghost.add_index(index);
+ }
+
+ number_cache.level_cell_index_partitioners[l] =
+ Utilities::MPI::Partitioner(is_local,
+ is_ghost,
+ this->mpi_communicator);
+ }
+ }
+
+#endif
+ }
+
+
+
+ template <int dim, int spacedim>
+ const Utilities::MPI::Partitioner &
+ TriangulationBase<dim, spacedim>::global_active_cell_index_partitioner() const
+ {
+ return number_cache.active_cell_index_partitioner;
+ }
+
+ template <int dim, int spacedim>
+ const Utilities::MPI::Partitioner &
+ TriangulationBase<dim, spacedim>::global_level_cell_index_partitioner(
+ const unsigned int level) const
+ {
+ Assert(this->is_multilevel_hierarchy_constructed(), ExcNotImplemented());
+ AssertIndexRange(level, this->n_global_levels());
+
+ return number_cache.level_cell_index_partitioners[level];
+ }
+
template <int dim, int spacedim>
DistributedTriangulationBase<dim, spacedim>::DistributedTriangulationBase(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const bool check_for_distorted_cells)