--- /dev/null
+Changed: The parallel::Triangulation class has been renamed to parallel::TriangulationBase
+to better reflect its purpose of a base to a series of parallel triangulations such as
+parallel::distributed::Triangulation or parallel::shared::Triangulation.
+<br>
+(Peter Munch, 2019/08/19)
namespace parallel
{
/**
- * Anytime a parallel::Triangulation is repartitioned, either upon request
+ * Anytime a parallel::TriangulationBase is repartitioned, either upon request
* or by refinement/coarsening, cells will be distributed amongst all
* subdomains to achieve an equally balanced workload. If the workload per
* cell varies, which is in general the case for hp::DoFHandler objects, we
* a hp::DoFHandler. One can choose from predefined weighting
* algorithms provided by this class or provide a custom one. The
* chosen weighting function will be connected to the corresponding
- * signal of the linked parallel::Triangulation via callback.
+ * signal of the linked parallel::TriangulationBase via callback.
*
* An object of this class needs to exist for every DoFHandler associated
* with the Triangulation we work on to achieve satisfying work balancing
* We store both to make sure to always work on the correct combination of
* both.
*/
- SmartPointer<const parallel::Triangulation<dim, spacedim>, CellWeights>
+ SmartPointer<const parallel::TriangulationBase<dim, spacedim>, CellWeights>
triangulation;
/**
*
*/
template <int dim, int spacedim = dim>
- class Triangulation : public dealii::parallel::Triangulation<dim, spacedim>
+ class Triangulation
+ : public dealii::parallel::TriangulationBase<dim, spacedim>
{
public:
using active_cell_iterator =
* MPI is not available.
*/
template <int dim, int spacedim = dim>
- class Triangulation : public dealii::parallel::Triangulation<dim, spacedim>
+ class Triangulation
+ : public dealii::parallel::TriangulationBase<dim, spacedim>
{
public:
/**
* and this function needs to know about boundaries. In other words, it is
* <i>not</i> enough to just set boundary indicators on newly created
* faces only <i>after</i> calling
- * <tt>distributed::parallel::Triangulation::execute_coarsening_and_refinement</tt>:
+ * <tt>distributed::parallel::TriangulationBase::execute_coarsening_and_refinement</tt>:
* it actually has to happen while that function is still running.
*
* The way to do this is by writing a function that sets boundary
* @ingroup distributed
*/
template <int dim, int spacedim = dim>
- class Triangulation : public dealii::parallel::Triangulation<dim, spacedim>
+ class Triangulation
+ : public dealii::parallel::TriangulationBase<dim, spacedim>
{
public:
/**
get_cell_weights() const;
/**
- * Override the implementation in parallel::Triangulation because
+ * Override the implementation in parallel::TriangulationBase because
* we can ask p4est about ghost neighbors across periodic boundaries.
*
* Specifically, this function determines the neighboring subdomains that
*/
template <int spacedim>
class Triangulation<1, spacedim>
- : public dealii::parallel::Triangulation<1, spacedim>
+ : public dealii::parallel::TriangulationBase<1, spacedim>
{
public:
/**
* p4est is not available.
*/
template <int dim, int spacedim = dim>
- class Triangulation : public dealii::parallel::Triangulation<dim, spacedim>
+ class Triangulation
+ : public dealii::parallel::TriangulationBase<dim, spacedim>
{
public:
/**
* parallel::shared::Triangulation.
*/
template <int dim, int spacedim = dim>
- class Triangulation : public dealii::Triangulation<dim, spacedim>
+ class TriangulationBase : public dealii::Triangulation<dim, spacedim>
{
public:
/**
* Constructor.
*/
- Triangulation(
+ TriangulationBase(
MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
/**
* Destructor.
*/
- virtual ~Triangulation() override;
+ virtual ~TriangulationBase() override;
/**
* Return MPI communicator used by this triangulation.
fill_level_ghost_owners();
};
+ /**
+ * Using directive for backwards-compatibility.
+ * @deprecated Use TriangulationBase instead of Triangulation.
+ */
+ template <int dim, int spacedim = dim>
+ using Triangulation DEAL_II_DEPRECATED = TriangulationBase<dim, spacedim>;
+
} // namespace parallel
DEAL_II_NAMESPACE_CLOSE
std::vector<types::global_dof_index>
DoFHandler<dim, spacedim>::compute_n_locally_owned_dofs_per_processor() const
{
- const parallel::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&this->get_triangulation()));
if (tr != nullptr)
return number_cache.get_n_locally_owned_dofs_per_processor(
std::vector<IndexSet>
DoFHandler<dim, spacedim>::compute_locally_owned_dofs_per_processor() const
{
- const parallel::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&this->get_triangulation()));
if (tr != nullptr)
return number_cache.get_locally_owned_dofs_per_processor(
mg_number_cache.size() == this->get_triangulation().n_global_levels(),
ExcMessage(
"The level dofs are not set up properly! Did you call distribute_mg_dofs()?"));
- const parallel::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&this->get_triangulation()));
if (tr != nullptr)
return mg_number_cache[level].get_locally_owned_dofs_per_processor(
* @pre for serial triangulation @p cell_order must have size
* <code>dof_handler.get_triangulation().n_active_cells()</code>, whereas
* in case of parallel triangulation its size should be
- * parallel::Triangulation::n_locally_owned_active_cells(). Every active cell
+ * parallel::TriangulationBase::n_locally_owned_active_cells(). Every active
+ * cell
* iterator of that triangulation needs to be present in @p cell_order exactly
* once.
*/
* vector therefore presents a (very particular) <i>permutation</i> of the
* current DoF indices.
* @param[out] inverse_renumbering The reverse of the permutation returned
- * in the previous argument. In case of parallel::Triangulation the inverse
- * is within locally owned DoFs.
+ * in the previous argument. In case of parallel::TriangulationBase the
+ * inverse is within locally owned DoFs.
* @param[in] dof_handler The DoFHandler whose degrees of freedom are to be
* renumbered.
* @param[in] cell_order A vector that contains the order of the cells that
* @pre for serial triangulation @p cell_order must have size
* <code>dof_handler.get_triangulation().n_active_cells()</code>, whereas
* in case of parallel triangulation its size should be
- * parallel::Triangulation::n_locally_owned_active_cells(). Every active cell
- * iterator of that triangulation needs to be present in @p
+ * parallel::TriangulationBase::n_locally_owned_active_cells(). Every active
+ * cell iterator of that triangulation needs to be present in @p
* cell_order exactly once. @post For each @p i between zero and
* <code>dof_handler.n_locally_owned_dofs()</code>, the condition
* <code>renumbering[inverse_renumbering[i]] ==
/**
* Same as above but return the selected DoFs as IndexSet. In particular,
- * for parallel::Triangulation objects this function should be preferred.
+ * for parallel::TriangulationBase objects this function should be preferred.
*/
template <int dim, int spacedim>
IndexSet
* distances between them, might make the function extremely faster.
*
* @note If a point is not found inside the mesh, or is lying inside an
- * artificial cell of a parallel::Triangulation, an exception is thrown.
+ * artificial cell of a parallel::TriangulationBase, an exception is thrown.
*
* @note The actual return type of this function, i.e., the type referenced
* above as @p return_type, is
* @p local_points for each process, find the points lying on the locally
* owned part of the mesh and compute the quadrature rules for them.
* Distributed compute point locations is a function similar to
- * GridTools::compute_point_locations but working for parallel::Triangulation
- * objects and, unlike its serial version, also for a distributed
- * triangulation (see parallel::distributed::Triangulation).
+ * GridTools::compute_point_locations but working for
+ * parallel::TriangulationBase objects and, unlike its serial version, also
+ * for a distributed triangulation (see parallel::distributed::Triangulation).
*
* @param[in] cache a GridTools::Cache object
* @param[in] local_points the array of points owned by the current process.
*
* The function uses the triangulation's mpi communicator: for this reason it
* throws an assert error if the Triangulation is not derived from
- * parallel::Triangulation .
+ * parallel::TriangulationBase .
*
* In a serial execution the first three elements of the tuple are the same
* as in GridTools::compute_point_locations .
# else
constexpr int dim = MeshType::dimension;
constexpr int spacedim = MeshType::space_dimension;
- auto tria = static_cast<const parallel::Triangulation<dim, spacedim> *>(
+ auto tria = static_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mesh.get_triangulation());
Assert(
tria != nullptr,
* areas that cover all or parts of the local portion of a
* parallel triangulation, and an unsigned int representing
* the process or subdomain that owns these cells.
- * Given a point on a parallel::Triangulation, this tree
+ * Given a point on a parallel::TriangulationBase, this tree
* allows to identify one, or few candidate processes, for
* which the point lies on a locally owned cell.
*
namespace parallel
{
template <int, int>
- class Triangulation;
+ class TriangulationBase;
}
#endif
if (is_artificial())
return false;
- const parallel::Triangulation<dim, spacedim> *pt =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(this->tria);
+ const parallel::TriangulationBase<dim, spacedim> *pt =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ this->tria);
if (pt == nullptr)
return true;
return true;
#else
- const parallel::Triangulation<dim, spacedim> *pt =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(this->tria);
+ const parallel::TriangulationBase<dim, spacedim> *pt =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ this->tria);
if (pt == nullptr)
return true;
return false;
#else
- const parallel::Triangulation<dim, spacedim> *pt =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(this->tria);
+ const parallel::TriangulationBase<dim, spacedim> *pt =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ this->tria);
if (pt == nullptr)
return false;
return false;
#else
- const parallel::Triangulation<dim, spacedim> *pt =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(this->tria);
+ const parallel::TriangulationBase<dim, spacedim> *pt =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ this->tria);
if (pt == nullptr)
return false;
/**
* Assign a hp::FECollection @p fe to this object.
*
- * In case a parallel::Triangulation is assigned to this object,
+ * In case a parallel::TriangulationBase is assigned to this object,
* the active_fe_indices will be exchanged between processors so that
* each one knows the indices on its own cells and all ghost cells.
*
std::vector<types::global_dof_index>
DoFHandler<dim, spacedim>::compute_n_locally_owned_dofs_per_processor() const
{
- const parallel::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&this->get_triangulation()));
if (tr != nullptr)
return number_cache.get_n_locally_owned_dofs_per_processor(
std::vector<IndexSet>
DoFHandler<dim, spacedim>::compute_locally_owned_dofs_per_processor() const
{
- const parallel::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&this->get_triangulation()));
if (tr != nullptr)
return number_cache.get_locally_owned_dofs_per_processor(
Assert(level < this->get_triangulation().n_global_levels(),
ExcMessage("The given level index exceeds the number of levels "
"present in the triangulation"));
- const parallel::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&this->get_triangulation()));
if (tr != nullptr)
return mg_number_cache[level].get_locally_owned_dofs_per_processor(
{
const auto &triangulation = dof_handler.get_triangulation();
if (const auto parallel_triangulation =
- dynamic_cast<const parallel::Triangulation<dim> *>(&triangulation))
+ dynamic_cast<const parallel::TriangulationBase<dim> *>(
+ &triangulation))
internal_reinit(mapping,
dof_handler,
constraints,
// boundaries as evenly as possible between the processors
std::map<types::subdomain_id, FaceIdentifier>
inner_faces_at_proc_boundary;
- if (dynamic_cast<const parallel::Triangulation<dim> *>(&triangulation))
+ if (dynamic_cast<const parallel::TriangulationBase<dim> *>(
+ &triangulation))
{
const types::subdomain_id my_domain =
triangulation.locally_owned_subdomain();
// looking at the length of the lists of faces
# if defined(DEAL_II_WITH_MPI) && defined(DEBUG)
MPI_Comm comm = MPI_COMM_SELF;
- if (const parallel::Triangulation<dim> *ptria =
- dynamic_cast<const parallel::Triangulation<dim> *>(
+ if (const parallel::TriangulationBase<dim> *ptria =
+ dynamic_cast<const parallel::TriangulationBase<dim> *>(
&triangulation))
comm = ptria->get_communicator();
// set variables that are independent of FE
if (Utilities::MPI::job_supports_mpi() == true)
{
- const parallel::Triangulation<dim> *dist_tria =
- dynamic_cast<const parallel::Triangulation<dim> *>(
+ const parallel::TriangulationBase<dim> *dist_tria =
+ dynamic_cast<const parallel::TriangulationBase<dim> *>(
&(dof_handler[0]->get_triangulation()));
task_info.communicator = dist_tria != nullptr ?
dist_tria->get_communicator() :
// set variables that are independent of FE
if (Utilities::MPI::job_supports_mpi() == true)
{
- const parallel::Triangulation<dim> *dist_tria =
- dynamic_cast<const parallel::Triangulation<dim> *>(
+ const parallel::TriangulationBase<dim> *dist_tria =
+ dynamic_cast<const parallel::TriangulationBase<dim> *>(
&(dof_handler[0]->get_triangulation()));
task_info.communicator = dist_tria != nullptr ?
dist_tria->get_communicator() :
end_cell = tria.end(0);
// For serial Triangulations always take all cells
const unsigned int subdomain_id =
- (dynamic_cast<const parallel::Triangulation<dim> *>(
+ (dynamic_cast<const parallel::TriangulationBase<dim> *>(
&dof_handler[0]->get_triangulation()) != nullptr) ?
my_pid :
numbers::invalid_subdomain_id;
end_cell = dof_handler[0]->end(0);
// For serial Triangulations always take all cells
const unsigned int subdomain_id =
- (dynamic_cast<const parallel::Triangulation<dim> *>(
+ (dynamic_cast<const parallel::TriangulationBase<dim> *>(
&dof_handler[0]->get_triangulation()) != nullptr) ?
my_pid :
numbers::invalid_subdomain_id;
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
*dist_tria = dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
*dist_tria = dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
*dist_tria = dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
*dist_tria = dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
const SparsityPatternType &sp,
const DoFHandlerType & dh)
{
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
*dist_tria = dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
const std::vector<unsigned int> &,
MGLevelObject<TrilinosWrappers::MPI::Vector> &v)
{
- const dealii::parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const dealii::parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mg_dof.get_triangulation()));
AssertThrow(
tria != nullptr,
const std::vector<unsigned int> &,
MGLevelObject<PETScWrappers::MPI::Vector> &v)
{
- const dealii::parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const dealii::parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mg_dof.get_triangulation()));
AssertThrow(
tria != nullptr,
dst[level].reinit(ghosted_level_vector[level], false);
else
{
- const parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mg_dof_handler.get_triangulation()));
dst[level].reinit(mg_dof_handler.locally_owned_mg_dofs(level),
tria != nullptr ? tria->get_communicator() :
ExcDimensionMismatch(
max_level, dof_handler.get_triangulation().n_global_levels() - 1));
- const parallel::Triangulation<dim, spacedim> *p_tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *p_tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof_handler.get_triangulation()));
MPI_Comm mpi_communicator =
p_tria != nullptr ? p_tria->get_communicator() : MPI_COMM_SELF;
// dst == defect level block vector. At first run this vector is not
// initialized. Do this below:
{
- const parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&(mg_dof[0]->get_triangulation())));
for (unsigned int i = 1; i < n_blocks; ++i)
- AssertThrow((dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
- &(mg_dof[0]->get_triangulation())) == tria),
- ExcMessage("The DoFHandler use different Triangulations!"));
+ AssertThrow(
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &(mg_dof[0]->get_triangulation())) == tria),
+ ExcMessage("The DoFHandler use different Triangulations!"));
MGLevelObject<bool> do_reinit;
do_reinit.resize(min_level, max_level);
* VectorTools::integrate_difference() and you normally want to supply the
* same value for @p norm as you used in VectorTools::integrate_difference().
*
- * If the given Triangulation is a parallel::Triangulation, entries
+ * If the given Triangulation is a parallel::TriangulationBase, entries
* in @p cellwise_error that do not correspond to locally owned cells are
* assumed to be 0.0 and a parallel reduction using MPI is done to compute
* the global error.
project_to_boundary_first);
else
{
- Assert((dynamic_cast<const parallel::Triangulation<dim> *>(
+ Assert((dynamic_cast<const parallel::TriangulationBase<dim> *>(
&(dof.get_triangulation())) == nullptr),
ExcNotImplemented());
do_project(mapping,
}
else
{
- Assert((dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
- &(dof.get_triangulation())) == nullptr),
- ExcNotImplemented());
+ Assert(
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &(dof.get_triangulation())) == nullptr),
+ ExcNotImplemented());
internal::do_project(mapping,
dof,
constraints,
const hp::QCollection<dim - 1> &q_boundary,
const bool project_to_boundary_first)
{
- Assert((dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ Assert((dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&(dof.get_triangulation())) == nullptr),
ExcNotImplemented());
MPI_Comm comm = MPI_COMM_SELF;
#ifdef DEAL_II_WITH_MPI
- if (const parallel::Triangulation<dim, spacedim> *ptria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(&tria))
+ if (const parallel::TriangulationBase<dim, spacedim> *ptria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &tria))
comm = ptria->get_communicator();
#endif
#ifdef DEAL_II_WITH_MPI
// if this was a distributed DoFHandler, we need to do the reduction
// over the entire domain
- if (const parallel::Triangulation<dim, spacedim> *p_triangulation =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *p_triangulation =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof.get_triangulation()))
{
// The type used to store the elements of the global vector may be a
const hp::DoFHandler<dim, spacedim> &dof_handler)
: dof_handler(&dof_handler, typeid(*this).name())
{
- triangulation = (dynamic_cast<parallel::Triangulation<dim, spacedim> *>(
+ triangulation = (dynamic_cast<parallel::TriangulationBase<dim, spacedim> *>(
const_cast<dealii::Triangulation<dim, spacedim> *>(
&(this->dof_handler->get_triangulation()))));
Assert(
triangulation != nullptr,
ExcMessage(
- "parallel::CellWeights requires a parallel::Triangulation object."));
+ "parallel::CellWeights requires a parallel::TriangulationBase object."));
}
smooth_grid,
const bool allow_artificial_cells,
const Settings settings)
- : dealii::parallel::Triangulation<dim, spacedim>(mpi_communicator,
- smooth_grid,
- false)
+ : dealii::parallel::TriangulationBase<dim, spacedim>(mpi_communicator,
+ smooth_grid,
+ false)
, settings(settings)
, allow_artificial_cells(allow_artificial_cells)
{
ExcMessage(
"Cannot use this function on parallel::distributed::Triangulation."));
- dealii::parallel::Triangulation<dim, spacedim>::copy_triangulation(
+ dealii::parallel::TriangulationBase<dim, spacedim>::copy_triangulation(
other_tria);
partition();
this->update_number_cache();
void
Triangulation<dim, spacedim>::update_number_cache()
{
- parallel::Triangulation<dim, spacedim>::update_number_cache();
+ parallel::TriangulationBase<dim, spacedim>::update_number_cache();
if (settings & construct_multigrid_hierarchy)
- parallel::Triangulation<dim, spacedim>::fill_level_ghost_owners();
+ parallel::TriangulationBase<dim, spacedim>::fill_level_ghost_owners();
}
} // namespace shared
} // namespace parallel
: // Do not check for distorted cells.
// For multigrid, we need limit_level_difference_at_vertices
// to make sure the transfer operators only need to consider two levels.
- dealii::parallel::Triangulation<dim, spacedim>(
+ dealii::parallel::TriangulationBase<dim, spacedim>(
mpi_communicator,
(settings_ & construct_multigrid_hierarchy) ?
static_cast<
void
Triangulation<dim, spacedim>::update_number_cache()
{
- parallel::Triangulation<dim, spacedim>::update_number_cache();
+ parallel::TriangulationBase<dim, spacedim>::update_number_cache();
if (settings & construct_multigrid_hierarchy)
- parallel::Triangulation<dim, spacedim>::fill_level_ghost_owners();
+ parallel::TriangulationBase<dim, spacedim>::fill_level_ghost_owners();
}
Triangulation<dim, spacedim>::memory_consumption() const
{
std::size_t mem =
- this->dealii::parallel::Triangulation<dim,
- spacedim>::memory_consumption() +
+ this->dealii::parallel::TriangulationBase<dim, spacedim>::
+ memory_consumption() +
MemoryConsumption::memory_consumption(triangulation_has_content) +
MemoryConsumption::memory_consumption(connectivity) +
MemoryConsumption::memory_consumption(parallel_forest) +
{
try
{
- dealii::parallel::Triangulation<dim, spacedim>::copy_triangulation(
- other_tria);
+ dealii::parallel::TriangulationBase<dim, spacedim>::
+ copy_triangulation(other_tria);
}
catch (
const typename dealii::Triangulation<dim, spacedim>::DistortedCellList
const typename dealii::Triangulation<1, spacedim>::MeshSmoothing
smooth_grid,
const Settings /*settings*/)
- : dealii::parallel::Triangulation<1, spacedim>(mpi_communicator,
- smooth_grid,
- false)
+ : dealii::parallel::TriangulationBase<1, spacedim>(mpi_communicator,
+ smooth_grid,
+ false)
{
Assert(false, ExcNotImplemented());
}
namespace parallel
{
template <int dim, int spacedim>
- Triangulation<dim, spacedim>::Triangulation(
+ TriangulationBase<dim, spacedim>::TriangulationBase(
MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
#ifndef DEAL_II_WITH_MPI
Assert(false,
ExcMessage("You compiled deal.II without MPI support, for "
- "which parallel::Triangulation is not available."));
+ "which parallel::TriangulationBase is not available."));
#endif
number_cache.n_locally_owned_active_cells.resize(n_subdomains);
}
template <int dim, int spacedim>
void
- Triangulation<dim, spacedim>::copy_triangulation(
+ TriangulationBase<dim, spacedim>::copy_triangulation(
const dealii::Triangulation<dim, spacedim> &other_tria)
{
#ifndef DEAL_II_WITH_MPI
(void)other_tria;
Assert(false,
ExcMessage("You compiled deal.II without MPI support, for "
- "which parallel::Triangulation is not available."));
+ "which parallel::TriangulationBase is not available."));
#else
dealii::Triangulation<dim, spacedim>::copy_triangulation(other_tria);
- if (const dealii::parallel::Triangulation<dim, spacedim> *other_tria_x =
- dynamic_cast<const dealii::parallel::Triangulation<dim, spacedim> *>(
- &other_tria))
+ if (const dealii::parallel::TriangulationBase<dim, spacedim> *other_tria_x =
+ dynamic_cast<const dealii::parallel::TriangulationBase<dim, spacedim>
+ *>(&other_tria))
{
mpi_communicator = other_tria_x->get_communicator();
template <int dim, int spacedim>
std::size_t
- Triangulation<dim, spacedim>::memory_consumption() const
+ TriangulationBase<dim, spacedim>::memory_consumption() const
{
std::size_t mem =
this->dealii::Triangulation<dim, spacedim>::memory_consumption() +
}
template <int dim, int spacedim>
- Triangulation<dim, spacedim>::~Triangulation()
+ TriangulationBase<dim, spacedim>::~TriangulationBase()
{
// release unused vector memory because the vector layout is going to look
// very different now
}
template <int dim, int spacedim>
- Triangulation<dim, spacedim>::NumberCache::NumberCache()
+ TriangulationBase<dim, spacedim>::NumberCache::NumberCache()
: n_global_active_cells(0)
, n_global_levels(0)
{}
template <int dim, int spacedim>
unsigned int
- Triangulation<dim, spacedim>::n_locally_owned_active_cells() const
+ TriangulationBase<dim, spacedim>::n_locally_owned_active_cells() const
{
return number_cache.n_locally_owned_active_cells[my_subdomain];
}
template <int dim, int spacedim>
unsigned int
- Triangulation<dim, spacedim>::n_global_levels() const
+ TriangulationBase<dim, spacedim>::n_global_levels() const
{
return number_cache.n_global_levels;
}
template <int dim, int spacedim>
types::global_dof_index
- Triangulation<dim, spacedim>::n_global_active_cells() const
+ TriangulationBase<dim, spacedim>::n_global_active_cells() const
{
return number_cache.n_global_active_cells;
}
template <int dim, int spacedim>
const std::vector<unsigned int> &
- Triangulation<dim, spacedim>::n_locally_owned_active_cells_per_processor()
+ TriangulationBase<dim, spacedim>::n_locally_owned_active_cells_per_processor()
const
{
return number_cache.n_locally_owned_active_cells;
template <int dim, int spacedim>
MPI_Comm
- Triangulation<dim, spacedim>::get_communicator() const
+ TriangulationBase<dim, spacedim>::get_communicator() const
{
return mpi_communicator;
}
#ifdef DEAL_II_WITH_MPI
template <int dim, int spacedim>
void
- Triangulation<dim, spacedim>::update_number_cache()
+ TriangulationBase<dim, spacedim>::update_number_cache()
{
Assert(number_cache.n_locally_owned_active_cells.size() ==
Utilities::MPI::n_mpi_processes(this->mpi_communicator),
template <int dim, int spacedim>
void
- Triangulation<dim, spacedim>::fill_level_ghost_owners()
+ TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
{
number_cache.level_ghost_owners.clear();
template <int dim, int spacedim>
void
- Triangulation<dim, spacedim>::update_number_cache()
+ TriangulationBase<dim, spacedim>::update_number_cache()
{
Assert(false, ExcNotImplemented());
}
template <int dim, int spacedim>
void
- Triangulation<dim, spacedim>::fill_level_ghost_owners()
+ TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
{
Assert(false, ExcNotImplemented());
}
template <int dim, int spacedim>
types::subdomain_id
- Triangulation<dim, spacedim>::locally_owned_subdomain() const
+ TriangulationBase<dim, spacedim>::locally_owned_subdomain() const
{
return my_subdomain;
}
template <int dim, int spacedim>
const std::set<types::subdomain_id> &
- Triangulation<dim, spacedim>::ghost_owners() const
+ TriangulationBase<dim, spacedim>::ghost_owners() const
{
return number_cache.ghost_owners;
}
template <int dim, int spacedim>
const std::set<types::subdomain_id> &
- Triangulation<dim, spacedim>::level_ghost_owners() const
+ TriangulationBase<dim, spacedim>::level_ghost_owners() const
{
return number_cache.level_ghost_owners;
}
template <int dim, int spacedim>
std::map<unsigned int, std::set<dealii::types::subdomain_id>>
- Triangulation<dim, spacedim>::compute_vertices_with_ghost_neighbors() const
+ TriangulationBase<dim, spacedim>::compute_vertices_with_ghost_neighbors()
+ const
{
// TODO: we are not treating periodic neighbors correctly here. If we do
// we can remove the overriding implementation for p::d::Triangulation
{
namespace parallel
\{
- template class Triangulation<deal_II_dimension>;
+ template class TriangulationBase<deal_II_dimension>;
#if deal_II_dimension < 3
- template class Triangulation<deal_II_dimension, deal_II_dimension + 1>;
+ template class TriangulationBase<deal_II_dimension,
+ deal_II_dimension + 1>;
#endif
#if deal_II_dimension < 2
- template class Triangulation<deal_II_dimension, deal_II_dimension + 2>;
+ template class TriangulationBase<deal_II_dimension,
+ deal_II_dimension + 2>;
#endif
\}
}
constexpr int dim = DoFHandlerType::dimension;
constexpr int spacedim = DoFHandlerType::space_dimension;
- const parallel::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&this->dof_handler->get_triangulation()));
Assert(tr != nullptr, ExcInternalError());
const unsigned int n_buckets = fe_collection.n_components();
std::vector<types::global_dof_index> shifts(n_buckets);
- if (const parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&start->get_dof_handler().get_triangulation())))
{
#ifdef DEAL_II_WITH_MPI
const unsigned int n_buckets = fe_collection.n_blocks();
std::vector<types::global_dof_index> shifts(n_buckets);
- if (const parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&start->get_dof_handler().get_triangulation())))
{
#ifdef DEAL_II_WITH_MPI
// DoFs for all previous processes
types::global_dof_index my_starting_index = 0;
- if (const parallel::Triangulation<dim, spacedim> *tria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof_handler.get_triangulation()))
{
#ifdef DEAL_II_WITH_MPI
const typename std::vector<typename DoFHandlerType::active_cell_iterator>
&cells)
{
- if (const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *p =
- dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
- &dof.get_triangulation()))
+ if (const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *p =
+ dynamic_cast<const parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(&dof.get_triangulation()))
{
AssertDimension(cells.size(), p->n_locally_owned_active_cells());
}
const Tensor<1, DoFHandlerType::space_dimension> &direction,
const bool dof_wise_renumbering)
{
- Assert((dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
- &dof.get_triangulation()) == nullptr),
- ExcNotImplemented());
+ Assert(
+ (dynamic_cast<
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
+ &dof.get_triangulation()) == nullptr),
+ ExcNotImplemented());
if (dof_wise_renumbering == false)
{
{
Assert(
(!dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&dof_handler.get_triangulation())),
ExcMessage(
"Parallel triangulations are already enumerated according to their MPI process id."));
// collect information for each subdomain index anyway, not just for the
// used one.)
const unsigned int n_subdomains =
- (dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ (dynamic_cast<const parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&dof_handler.get_triangulation()) == nullptr ?
[&dof_handler]() {
unsigned int max_subdomain_id = 0;
return max_subdomain_id + 1;
}() :
Utilities::MPI::n_mpi_processes(
- dynamic_cast<
- const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
+ dynamic_cast<const parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
&dof_handler.get_triangulation())
->get_communicator()));
Assert(n_subdomains > *std::max_element(subdomain_association.begin(),
const unsigned int dim = DoFHandlerType::dimension;
const unsigned int spacedim = DoFHandlerType::space_dimension;
- if (const parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof_handler.get_triangulation())))
{
std::vector<types::global_dof_index> local_dof_count =
#ifdef DEAL_II_WITH_MPI
// if we are working on a parallel mesh, we now need to collect
// this information from all processors
- if (const parallel::Triangulation<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
- *tria = (dynamic_cast<const parallel::Triangulation<
+ if (const parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
+ *tria = (dynamic_cast<const parallel::TriangulationBase<
DoFHandlerType::dimension,
DoFHandlerType::space_dimension> *>(
&dof_handler.get_triangulation())))
MPI_Comm communicator = MPI_COMM_SELF;
try
{
- const typename dealii::parallel::Triangulation<dim, spacedim>
+ const typename dealii::parallel::TriangulationBase<dim,
+ spacedim>
&tria = dynamic_cast<const typename dealii::parallel::
- Triangulation<dim, spacedim> &>(
+ TriangulationBase<dim, spacedim> &>(
coarse_to_fine_grid_map.get_destination_grid()
.get_triangulation());
communicator = tria.get_communicator();
// and dofs on a cell owned by a different processor.
constexpr int dim = DoFHandlerType::dimension;
constexpr int spacedim = DoFHandlerType::space_dimension;
- if (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof_row.get_triangulation()) != nullptr ||
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof_col.get_triangulation()) != nullptr)
{
Assert(&dof_row.get_triangulation() == &dof_col.get_triangulation(),
// if not, just create a .vtu file with no reference
// to the processor number
std::string new_file = filename_without_extension + ".vtu";
- if (const parallel::Triangulation<dim, spacedim> *tr =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(&tria))
+ if (const parallel::TriangulationBase<dim, spacedim> *tr =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(&tria))
{
new_file = filename_without_extension + ".proc" +
Utilities::int_to_string(tr->locally_owned_subdomain(), 4) +
double global_volume = 0;
#ifdef DEAL_II_WITH_MPI
- if (const parallel::Triangulation<dim, spacedim> *p_tria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *p_tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&triangulation))
global_volume =
Utilities::MPI::sum(local_volume, p_tria->get_communicator());
double global_min_diameter = 0;
#ifdef DEAL_II_WITH_MPI
- if (const parallel::Triangulation<dim, spacedim> *p_tria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *p_tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&triangulation))
global_min_diameter =
Utilities::MPI::min(min_diameter, p_tria->get_communicator());
double global_max_diameter = 0;
#ifdef DEAL_II_WITH_MPI
- if (const parallel::Triangulation<dim, spacedim> *p_tria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *p_tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&triangulation))
global_max_diameter =
Utilities::MPI::max(max_diameter, p_tria->get_communicator());
#else
// Recovering the mpi communicator used to create the triangulation
const auto &tria_mpi =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&cache.get_triangulation());
// If the dynamic cast failed we can't recover the mpi communicator:
// throwing an assertion error
std::vector<BoundingBox<spacedim>> bbox_v(1, bbox);
if (const auto tria_mpi =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&(*tria)))
{
covering_rtree = GridTools::build_global_description_tree(
else
{
// a sequential triangulation. there is nothing we need to do here
- Assert((dynamic_cast<
- const dealii::parallel::Triangulation<dim, spacedim> *>(
- &dof_handler.get_triangulation()) == nullptr),
- ExcInternalError());
+ Assert(
+ (dynamic_cast<
+ const dealii::parallel::TriangulationBase<dim, spacedim> *>(
+ &dof_handler.get_triangulation()) == nullptr),
+ ExcInternalError());
}
}
}
}
- if (const parallel::Triangulation<dim, spacedim> *parallel_tria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *parallel_tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof_handler.get_triangulation()))
{
max_smoothness_refine =
// now do a global reduction over all processors to see what operation
// they can agree upon
- if (const parallel::Triangulation<dim, spacedim> *ptria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ if (const parallel::TriangulationBase<dim, spacedim> *ptria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mg_dof.get_triangulation()))
perform_plain_copy = (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
ptria->get_communicator()) == 1);
MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::
fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
{
- const parallel::Triangulation<dim, spacedim> *ptria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *ptria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mg_dof.get_triangulation());
const MPI_Comm mpi_communicator =
ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF;
unsigned int global_min = min_level;
// If necessary, communicate to find minimum
// level for an active cell over all subdomains
- if (const parallel::Triangulation<dim, spacedim> *tr =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(&tria))
+ if (const parallel::TriangulationBase<dim, spacedim> *tr =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &tria))
global_min = Utilities::MPI::min(min_level, tr->get_communicator());
AssertIndexRange(global_min, tria.n_global_levels());
// It is only necessary to calculate the imbalance
// on a distributed mesh. The imbalance is always
// 1.0 for the serial case.
- if (const parallel::Triangulation<dim, spacedim> *tr =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(&tria))
+ if (const parallel::TriangulationBase<dim, spacedim> *tr =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &tria))
{
const unsigned int n_proc =
Utilities::MPI::n_mpi_processes(tr->get_communicator());
}
}
- const dealii::parallel::Triangulation<dim, spacedim> *tria =
- (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const dealii::parallel::TriangulationBase<dim, spacedim> *tria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mg_dof.get_triangulation()));
AssertThrow(
send_data_temp.size() == 0 || tria != nullptr,
}
// step 2.7: Initialize the ghosted vector
- const parallel::Triangulation<dim, dim> *ptria =
- (dynamic_cast<const parallel::Triangulation<dim, dim> *>(&tria));
+ const parallel::TriangulationBase<dim, dim> *ptria =
+ (dynamic_cast<const parallel::TriangulationBase<dim, dim> *>(
+ &tria));
const MPI_Comm communicator =
ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF;
// be manually distributed.
// Retrieve communicator from triangulation if it is parallel
- const parallel::Triangulation<dim, spacedim> *dist_tria =
- dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ const parallel::TriangulationBase<dim, spacedim> *dist_tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&(mg_dof.get_triangulation()));
MPI_Comm communicator = dist_tria != nullptr ?
ExcNotImplemented());
const bool tria_is_parallel =
- (dynamic_cast<const parallel::Triangulation<dim1, spacedim> *>(
+ (dynamic_cast<const parallel::TriangulationBase<dim1, spacedim> *>(
&space_dh.get_triangulation()) != nullptr);
const auto &space_fe = space_dh.get_fe();
const auto &immersed_fe = immersed_dh.get_fe();
ExcNotImplemented());
const bool tria_is_parallel =
- (dynamic_cast<const parallel::Triangulation<dim1, spacedim> *>(
+ (dynamic_cast<const parallel::TriangulationBase<dim1, spacedim> *>(
&space_dh.get_triangulation()) != nullptr);
const auto &space_fe = space_dh.get_fe();