if (number_cache.n_locally_owned_dofs_per_processor.empty() &&
number_cache.n_global_dofs > 0)
{
- MPI_Comm comm;
-
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- comm = tr->get_communicator();
- else
- comm = MPI_COMM_SELF;
-
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
number_cache)
.n_locally_owned_dofs_per_processor =
- number_cache.get_n_locally_owned_dofs_per_processor(comm);
+ number_cache.get_n_locally_owned_dofs_per_processor(get_communicator());
}
return number_cache.n_locally_owned_dofs_per_processor;
}
if (number_cache.locally_owned_dofs_per_processor.empty() &&
number_cache.n_global_dofs > 0)
{
- MPI_Comm comm;
-
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- comm = tr->get_communicator();
- else
- comm = MPI_COMM_SELF;
-
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
number_cache)
.locally_owned_dofs_per_processor =
- number_cache.get_locally_owned_dofs_per_processor(comm);
+ number_cache.get_locally_owned_dofs_per_processor(get_communicator());
}
return number_cache.locally_owned_dofs_per_processor;
}
if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() &&
mg_number_cache[level].n_global_dofs > 0)
{
- MPI_Comm comm;
-
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- comm = tr->get_communicator();
- else
- comm = MPI_COMM_SELF;
-
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
mg_number_cache[level])
.locally_owned_dofs_per_processor =
- mg_number_cache[level].get_locally_owned_dofs_per_processor(comm);
+ mg_number_cache[level].get_locally_owned_dofs_per_processor(
+ get_communicator());
}
return mg_number_cache[level].locally_owned_dofs_per_processor;
}
const SparsityPatternType & sp,
const DoFHandler<dim, spacedim> &dh)
{
- const dealii::parallel::TriangulationBase<dim, spacedim> *dist_tria =
- dynamic_cast<
- const dealii::parallel::TriangulationBase<dim, spacedim> *>(
- &(dh.get_triangulation()));
- MPI_Comm communicator =
- dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+ const MPI_Comm communicator = dh.get_communicator();
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
const SparsityPatternType & sp,
const DoFHandler<dim, spacedim> &dh)
{
- const dealii::parallel::TriangulationBase<dim, spacedim> *dist_tria =
- dynamic_cast<
- const dealii::parallel::TriangulationBase<dim, spacedim> *>(
- &(dh.get_triangulation()));
- MPI_Comm communicator =
- dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+ const MPI_Comm communicator = dh.get_communicator();
+
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
sp,
const SparsityPatternType & sp,
const DoFHandler<dim, spacedim> &dh)
{
- const dealii::parallel::TriangulationBase<dim, spacedim> *dist_tria =
- dynamic_cast<
- const dealii::parallel::TriangulationBase<dim, spacedim> *>(
- &(dh.get_triangulation()));
- MPI_Comm communicator =
- dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+ const MPI_Comm communicator = dh.get_communicator();
+
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
sp,
const SparsityPatternType & sp,
const DoFHandler<dim, spacedim> &dh)
{
- const dealii::parallel::TriangulationBase<dim, spacedim> *dist_tria =
- dynamic_cast<
- const dealii::parallel::TriangulationBase<dim, spacedim> *>(
- &(dh.get_triangulation()));
- MPI_Comm communicator =
- dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+ const MPI_Comm communicator = dh.get_communicator();
+
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
sp,
const SparsityPatternType & sp,
const DoFHandler<dim, spacedim> &dh)
{
- const dealii::parallel::TriangulationBase<dim, spacedim> *dist_tria =
- dynamic_cast<
- const dealii::parallel::TriangulationBase<dim, spacedim> *>(
- &(dh.get_triangulation()));
- MPI_Comm communicator =
- dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+ const MPI_Comm communicator = dh.get_communicator();
+
// Reinit PETSc matrix
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
dst[level].reinit(ghosted_level_vector[level], false);
else
{
- const dealii::parallel::TriangulationBase<dim, spacedim> *tria =
- (dynamic_cast<
- const dealii::parallel::TriangulationBase<dim, spacedim> *>(
- &dof_handler.get_triangulation()));
dst[level].reinit(dof_handler.locally_owned_mg_dofs(level),
- tria != nullptr ? tria->get_communicator() :
- MPI_COMM_SELF);
+ dof_handler.get_communicator());
}
}
else if ((perform_plain_copy == false &&
LinearAlgebra::distributed::Vector<Number> &v =
dst[level].block(b);
v.reinit(dof_handler[b]->locally_owned_mg_dofs(level),
- tria != nullptr ? tria->get_communicator() :
- MPI_COMM_SELF);
+ dof_handler[b]->get_communicator());
}
dst[level].collect_sizes();
}
}
#endif
- MPI_Comm comm = MPI_COMM_SELF;
-#ifdef DEAL_II_WITH_MPI
- if (const parallel::TriangulationBase<dim, spacedim> *ptria =
- dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &tria))
- comm = ptria->get_communicator();
-#endif
+ const MPI_Comm comm = tria.get_communicator();
switch (norm)
{
MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::
fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
{
- const parallel::TriangulationBase<dim, spacedim> *ptria =
- dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &mg_dof.get_triangulation());
- const MPI_Comm mpi_communicator =
- ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF;
+ const MPI_Comm mpi_communicator = mg_dof.get_communicator();
fill_internal(mg_dof,
mg_constrained_dofs,
// the base class for keeping ghosted transfer indices. To avoid
// keeping two very similar vectors, we keep one single ghosted
// vector that is augmented/filled here.
- const dealii::parallel::TriangulationBase<dim, dim> *ptria =
- (dynamic_cast<
- const dealii::parallel::TriangulationBase<dim, dim> *>(&tria));
- const MPI_Comm communicator =
- ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF;
-
reinit_level_partitioner(dof_handler.locally_owned_mg_dofs(level),
ghosted_level_dofs,
external_partitioners.empty() ?
nullptr :
external_partitioners[level],
- communicator,
+ tria.get_communicator(),
target_partitioners[level],
copy_indices_global_mine[level]);
external_partitioners.empty() ?
nullptr :
external_partitioners[0],
- communicator,
+ tria.get_communicator(),
target_partitioners[0],
copy_indices_global_mine[0]);
// complete sparsity patterns on their own, the sparsity pattern must
// be manually distributed.
- // Retrieve communicator from triangulation if it is parallel
- const parallel::TriangulationBase<dim, spacedim> *dist_tria =
- dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &(dof_handler.get_triangulation()));
-
- MPI_Comm communicator = dist_tria != nullptr ?
- dist_tria->get_communicator() :
- MPI_COMM_SELF;
-
// Distribute sparsity pattern
::dealii::SparsityTools::distribute_sparsity_pattern(
dsp,
dof_handler.locally_owned_mg_dofs(level + 1),
- communicator,
+ dof_handler.get_communicator(),
dsp.row_index_set());
}
#endif
if (!ids.empty())
AssertDimension(ids.size(), positions.size());
- const auto tria =
- dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &(*triangulation));
- const auto comm =
- (tria != nullptr ? tria->get_communicator() : MPI_COMM_WORLD);
+ const auto comm = triangulation->get_communicator();
const auto n_mpi_processes = Utilities::MPI::n_mpi_processes(comm);