#define dealii_mg_transfer_global_coarsening_h
#include <deal.II/base/mg_level_object.h>
+#include <deal.II/base/mpi_remote_point_evaluation.h>
#include <deal.II/base/vectorization.h>
#include <deal.II/dofs/dof_handler.h>
+template <typename VectorType>
+class MGTwoLevelTransferBase : public Subscriptor
+{
+public:
+ /**
+ * Perform prolongation.
+ */
+ virtual void
+ prolongate_and_add(VectorType &dst, const VectorType &src) const = 0;
+
+ /**
+ * Perform restriction.
+ */
+ virtual void
+ restrict_and_add(VectorType &dst, const VectorType &src) const = 0;
+
+ /**
+ * Perform interpolation of a solution vector from the fine level to the
+ * coarse level. This function is different from restriction, where a
+ * weighted residual is transferred to a coarser level (transposition of
+ * prolongation matrix).
+ */
+ virtual void
+ interpolate(VectorType &dst, const VectorType &src) const = 0;
+
+ /**
+ * Enable inplace vector operations if external and internal vectors
+ * are compatible.
+ */
+ virtual void
+ enable_inplace_operations_if_possible(
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &partitioner_coarse,
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &partitioner_fine) = 0;
+
+ /**
+ * Return the memory consumption of the allocated memory in this class.
+ */
+ virtual std::size_t
+ memory_consumption() const = 0;
+};
+
+
+
+template <typename Number>
+class MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>
+ : public Subscriptor
+{
+public:
+ using VectorType = LinearAlgebra::distributed::Vector<Number>;
+
+ /**
+ * Perform prolongation.
+ */
+ virtual void
+ prolongate_and_add(VectorType &dst, const VectorType &src) const;
+
+ /**
+ * Perform restriction.
+ */
+ virtual void
+ restrict_and_add(VectorType &dst, const VectorType &src) const;
+
+ /**
+ * Perform interpolation of a solution vector from the fine level to the
+ * coarse level. This function is different from restriction, where a
+ * weighted residual is transferred to a coarser level (transposition of
+ * prolongation matrix).
+ */
+ virtual void
+ interpolate(VectorType &dst, const VectorType &src) const = 0;
+
+ /**
+ * Enable inplace vector operations if external and internal vectors
+ * are compatible.
+ */
+ virtual void
+ enable_inplace_operations_if_possible(
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &partitioner_coarse,
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &partitioner_fine) = 0;
+
+ /**
+ * Return the memory consumption of the allocated memory in this class.
+ */
+ virtual std::size_t
+ memory_consumption() const = 0;
+
+protected:
+ /**
+ * Perform prolongation.
+ */
+ virtual void
+ prolongate_and_add_internal(
+ LinearAlgebra::distributed::Vector<Number> & dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const = 0;
+
+ /**
+ * Perform restriction.
+ */
+ virtual void
+ restrict_and_add_internal(
+ LinearAlgebra::distributed::Vector<Number> & dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const = 0;
+
+ void
+ update_ghost_values(
+ const LinearAlgebra::distributed::Vector<Number> &vec) const;
+
+ void
+ compress(LinearAlgebra::distributed::Vector<Number> &vec,
+ const VectorOperation::values op) const;
+
+ void
+ zero_out_ghost_values(
+ const LinearAlgebra::distributed::Vector<Number> &vec) const;
+
+ /**
+ * Enable inplace vector operations if external and internal vectors
+ * are compatible.
+ */
+ template <typename ConstraintInfo>
+ void
+ internal_enable_inplace_operations_if_possible(
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &partitioner_coarse,
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_fine,
+ ConstraintInfo & constraint_info);
+
+ /**
+ * Flag if the finite elements on the fine cells are continuous. If yes,
+ * the multiplicity of DoF sharing a vertex/line as well as constraints have
+ * to be taken into account via weights.
+ */
+ bool fine_element_is_continuous;
+
+ /**
+ * Partitioner needed by the intermediate vector.
+ */
+ std::shared_ptr<const Utilities::MPI::Partitioner> partitioner_coarse;
+
+ /**
+ * Partitioner needed by the intermediate vector.
+ */
+ std::shared_ptr<const Utilities::MPI::Partitioner> partitioner_fine;
+
+ /**
+ * Internal vector needed for collecting all degrees of freedom of the fine
+ * cells. It is only initialized if the fine-level DoF indices touch DoFs
+ * other than the locally active ones (which we always assume can be
+ * accessed by the given vectors in the prolongate/restrict functions),
+ * otherwise it is left at size zero.
+ */
+ mutable LinearAlgebra::distributed::Vector<Number> vec_fine;
+
+ /**
+ * Internal vector on that the actual prolongation/restriction is performed.
+ */
+ mutable LinearAlgebra::distributed::Vector<Number> vec_coarse;
+
+ /**
+ * Embedded partitioner for efficient communication if locally relevant DoFs
+ * are a subset of an external Partitioner object.
+ */
+ std::shared_ptr<const Utilities::MPI::Partitioner>
+ partitioner_coarse_embedded;
+
+ /**
+ * Embedded partitioner for efficient communication if locally relevant DoFs
+ * are a subset of an external Partitioner object.
+ */
+ std::shared_ptr<const Utilities::MPI::Partitioner> partitioner_fine_embedded;
+
+ /**
+ * Buffer for efficient communication if locally relevant DoFs
+ * are a subset of an external Partitioner object.
+ */
+ mutable AlignedVector<Number> buffer_coarse_embedded;
+
+ /**
+ * Buffer for efficient communication if locally relevant DoFs
+ * are a subset of an external Partitioner object.
+ */
+ mutable AlignedVector<Number> buffer_fine_embedded;
+
+ /**
+ * DoF indices of the fine cells, expressed in indices local to the MPI
+ * rank.
+ */
+ std::vector<unsigned int> level_dof_indices_fine;
+};
+
+
+
/**
* Class for transfer between two multigrid levels for p- or global coarsening.
*
* The implementation of this class is explained in detail in @cite munch2022gc.
*/
template <int dim, typename VectorType>
-class MGTwoLevelTransfer
+class MGTwoLevelTransfer : public MGTwoLevelTransferBase<VectorType>
{
public:
/**
* Perform prolongation.
*/
void
- prolongate_and_add(VectorType &dst, const VectorType &src) const;
+ prolongate_and_add(VectorType &dst, const VectorType &src) const override;
/**
* Perform restriction.
*/
void
- restrict_and_add(VectorType &dst, const VectorType &src) const;
+ restrict_and_add(VectorType &dst, const VectorType &src) const override;
/**
* Perform interpolation of a solution vector from the fine level to the
* prolongation matrix).
*/
void
- interpolate(VectorType &dst, const VectorType &src) const;
+ interpolate(VectorType &dst, const VectorType &src) const override;
/**
* Enable inplace vector operations if external and internal vectors
enable_inplace_operations_if_possible(
const std::shared_ptr<const Utilities::MPI::Partitioner>
&partitioner_coarse,
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_fine);
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_fine)
+ override;
/**
* Return the memory consumption of the allocated memory in this class.
*/
std::size_t
- memory_consumption() const;
+ memory_consumption() const override;
};
*/
template <int dim, typename Number>
class MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>
+ : public MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>
{
public:
/**
fast_polynomial_transfer_supported(const unsigned int fe_degree_fine,
const unsigned int fe_degree_coarse);
- /**
- * Perform prolongation.
- */
- void
- prolongate_and_add(
- LinearAlgebra::distributed::Vector<Number> & dst,
- const LinearAlgebra::distributed::Vector<Number> &src) const;
-
- /**
- * Perform restriction.
- */
- void
- restrict_and_add(LinearAlgebra::distributed::Vector<Number> & dst,
- const LinearAlgebra::distributed::Vector<Number> &src) const;
-
/**
* Perform interpolation of a solution vector from the fine level to the
* coarse level. This function is different from restriction, where a
* prolongation matrix).
*/
void
- interpolate(LinearAlgebra::distributed::Vector<Number> & dst,
- const LinearAlgebra::distributed::Vector<Number> &src) const;
+ interpolate(
+ LinearAlgebra::distributed::Vector<Number> & dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const override;
/**
* Enable inplace vector operations if external and internal vectors
enable_inplace_operations_if_possible(
const std::shared_ptr<const Utilities::MPI::Partitioner>
&partitioner_coarse,
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_fine);
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_fine)
+ override;
/**
* Return the memory consumption of the allocated memory in this class.
*/
std::size_t
- memory_consumption() const;
-
-private:
- void
- update_ghost_values(const LinearAlgebra::distributed::Vector<Number> &) const;
+ memory_consumption() const override;
+protected:
void
- compress(LinearAlgebra::distributed::Vector<Number> &,
- const VectorOperation::values) const;
+ prolongate_and_add_internal(
+ LinearAlgebra::distributed::Vector<Number> & dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const override;
void
- zero_out_ghost_values(
- const LinearAlgebra::distributed::Vector<Number> &) const;
+ restrict_and_add_internal(
+ LinearAlgebra::distributed::Vector<Number> & dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const override;
+private:
/**
* A multigrid transfer scheme. A multrigrid transfer class can have different
* transfer schemes to enable p-adaptivity (one transfer scheme per
*/
std::vector<MGTransferScheme> schemes;
- /**
- * Flag if the finite elements on the fine cells are continuous. If yes,
- * the multiplicity of DoF sharing a vertex/line as well as constraints have
- * to be taken into account via weights.
- */
- bool fine_element_is_continuous;
-
- /**
- * Partitioner needed by the intermediate vector.
- */
- std::shared_ptr<const Utilities::MPI::Partitioner> partitioner_fine;
-
- /**
- * Embedded partitioner for efficient communication if locally relevant DoFs
- * are a subset of an external Partitioner object.
- */
- std::shared_ptr<const Utilities::MPI::Partitioner> partitioner_fine_embedded;
-
- /**
- * Buffer for efficient communication if locally relevant DoFs
- * are a subset of an external Partitioner object.
- */
- mutable AlignedVector<Number> buffer_fine_embedded;
-
- /**
- * Partitioner needed by the intermediate vector.
- */
- std::shared_ptr<const Utilities::MPI::Partitioner> partitioner_coarse;
-
- /**
- * Embedded partitioner for efficient communication if locally relevant DoFs
- * are a subset of an external Partitioner object.
- */
- std::shared_ptr<const Utilities::MPI::Partitioner>
- partitioner_coarse_embedded;
-
- /**
- * Buffer for efficient communication if locally relevant DoFs
- * are a subset of an external Partitioner object.
- */
- mutable AlignedVector<Number> buffer_coarse_embedded;
-
- /**
- * Internal vector needed for collecting all degrees of freedom of the fine
- * cells. It is only initialized if the fine-level DoF indices touch DoFs
- * other than the locally active ones (which we always assume can be
- * accessed by the given vectors in the prolongate/restrict functions),
- * otherwise it is left at size zero.
- */
- mutable LinearAlgebra::distributed::Vector<Number> vec_fine;
-
- /**
- * Internal vector on that the actual prolongation/restriction is performed.
- */
- mutable LinearAlgebra::distributed::Vector<Number> vec_coarse;
-
/**
* Helper class for reading from and writing to global vectors and for
* applying constraints.
*/
AlignedVector<VectorizedArray<Number>> weights_compressed;
- /**
- * DoF indices of the fine cells, expressed in indices local to the MPI
- * rank.
- */
- std::vector<unsigned int> level_dof_indices_fine;
-
/**
* Number of components.
*/
* internal level vectors within the function call copy_to_mg() if used in the
* context of PreconditionMG.
*/
+ template <typename T>
MGTransferGlobalCoarsening(
- const MGLevelObject<MGTwoLevelTransfer<dim, VectorType>> &transfer,
+ const MGLevelObject<T> &transfer,
const std::function<void(const unsigned int, VectorType &)>
&initialize_dof_vector = {});
/**
* Collection of the two-level transfer operators.
*/
- MGLevelObject<MGTwoLevelTransfer<dim, VectorType>> transfer;
+ MGLevelObject<SmartPointer<MGTwoLevelTransferBase<VectorType>>> transfer;
/**
* External partitioners used during initialize_dof_vector().
template <int dim, typename VectorType>
+template <typename T>
MGTransferGlobalCoarsening<dim, VectorType>::MGTransferGlobalCoarsening(
- const MGLevelObject<MGTwoLevelTransfer<dim, VectorType>> &transfer,
+ const MGLevelObject<T> &transfer,
const std::function<void(const unsigned int, VectorType &)>
&initialize_dof_vector)
- : transfer(transfer)
{
+ const unsigned int min_level = transfer.min_level();
+ const unsigned int max_level = transfer.max_level();
+
+ this->transfer.resize(min_level, max_level);
+
+ for (unsigned int l = min_level; l <= max_level; ++l)
+ this->transfer[l] = &const_cast<MGTwoLevelTransferBase<VectorType> &>(
+ static_cast<const MGTwoLevelTransferBase<VectorType> &>(
+ Utilities::get_underlying_value(transfer[l])));
+
this->build(initialize_dof_vector);
}
AssertDimension(this->external_partitioners.size(), transfer.n_levels());
for (unsigned int l = min_level + 1; l <= max_level; ++l)
- transfer[l].enable_inplace_operations_if_possible(
+ transfer[l]->enable_inplace_operations_if_possible(
this->external_partitioners[l - 1 - min_level],
this->external_partitioners[l - min_level]);
}
VectorType & dst,
const VectorType & src) const
{
- this->transfer[to_level].prolongate_and_add(dst, src);
+ this->transfer[to_level]->prolongate_and_add(dst, src);
}
VectorType & dst,
const VectorType & src) const
{
- this->transfer[from_level].restrict_and_add(dst, src);
+ this->transfer[from_level]->restrict_and_add(dst, src);
}
dst[transfer.max_level()].copy_locally_owned_data_from(src);
for (unsigned int l = max_level; l > min_level; --l)
- this->transfer[l].interpolate(dst[l - 1], dst[l]);
+ this->transfer[l]->interpolate(dst[l - 1], dst[l]);
}
const unsigned int max_level = transfer.max_level();
for (unsigned int l = min_level + 1; l <= max_level; ++l)
- size += this->transfer[l].memory_consumption();
+ size += this->transfer[l]->memory_consumption();
return size;
}
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/fe_tools.h>
+#include <deal.II/fe/fe_values.h>
#include <deal.II/grid/cell_id_translator.h>
#include <deal.II/grid/filtered_iterator.h>
#include <deal.II/matrix_free/evaluation_kernels.h>
#include <deal.II/matrix_free/evaluation_template_factory.h>
+#include <deal.II/matrix_free/fe_point_evaluation.h>
#include <deal.II/matrix_free/tensor_product_kernels.h>
#include <deal.II/matrix_free/vector_access_internal.h>
-template <int dim, typename Number>
+template <typename Number>
void
-MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
+MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>::
prolongate_and_add(
LinearAlgebra::distributed::Vector<Number> & dst,
const LinearAlgebra::distributed::Vector<Number> &src) const
{
- using VectorizedArrayType = VectorizedArray<Number>;
-
- const unsigned int n_lanes = VectorizedArrayType::size();
-
const bool use_dst_inplace = this->vec_fine.size() == 0;
const auto vec_fine_ptr = use_dst_inplace ? &dst : &this->vec_fine;
- Assert(vec_fine_ptr->get_partitioner().get() == partitioner_fine.get(),
+ Assert(vec_fine_ptr->get_partitioner().get() == this->partitioner_fine.get(),
ExcInternalError());
const bool use_src_inplace = this->vec_coarse.size() == 0;
const auto vec_coarse_ptr = use_src_inplace ? &src : &this->vec_coarse;
- Assert(vec_coarse_ptr->get_partitioner().get() == partitioner_coarse.get(),
+ Assert(vec_coarse_ptr->get_partitioner().get() ==
+ this->partitioner_coarse.get(),
ExcInternalError());
if (use_src_inplace == false)
- vec_coarse.copy_locally_owned_data_from(src);
+ this->vec_coarse.copy_locally_owned_data_from(src);
- update_ghost_values(*vec_coarse_ptr);
+ this->update_ghost_values(*vec_coarse_ptr);
if (fine_element_is_continuous && (use_dst_inplace == false))
*vec_fine_ptr = Number(0.);
+ this->prolongate_and_add_internal(*vec_fine_ptr, *vec_coarse_ptr);
+
+ if (fine_element_is_continuous || use_dst_inplace == false)
+ this->compress(*vec_fine_ptr, VectorOperation::add);
+
+ if (use_dst_inplace == false)
+ dst += this->vec_fine;
+
+ if (use_src_inplace)
+ this->zero_out_ghost_values(*vec_coarse_ptr);
+}
+
+
+
+template <int dim, typename Number>
+void
+MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
+ prolongate_and_add_internal(
+ LinearAlgebra::distributed::Vector<Number> & dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
+{
+ using VectorizedArrayType = VectorizedArray<Number>;
+
+ const unsigned int n_lanes = VectorizedArrayType::size();
+
AlignedVector<VectorizedArrayType> evaluation_data_fine;
AlignedVector<VectorizedArrayType> evaluation_data_coarse;
- const unsigned int * indices_fine = level_dof_indices_fine.data();
- const Number * weights = nullptr;
+ const unsigned int *indices_fine = this->level_dof_indices_fine.data();
+ const Number * weights = nullptr;
const VectorizedArray<Number> *weights_compressed = nullptr;
- if (fine_element_is_continuous)
+ if (this->fine_element_is_continuous)
{
weights = this->weights.data();
weights_compressed = this->weights_compressed.data();
// read from src vector (similar to FEEvaluation::read_dof_values())
internal::VectorReader<Number, VectorizedArrayType> reader;
constraint_info.read_write_operation(reader,
- *vec_coarse_ptr,
+ src,
evaluation_data_coarse.data(),
cell_counter,
n_lanes_filled,
// ------------------------------ fine -------------------------------
// weight and write into dst vector
- if (fine_element_is_continuous && this->weights_compressed.size() > 0)
+ if (this->fine_element_is_continuous &&
+ this->weights_compressed.size() > 0)
{
internal::
weight_fe_q_dofs_by_entity<dim, -1, VectorizedArray<Number>>(
for (unsigned int v = 0; v < n_lanes_filled; ++v)
{
- if (fine_element_is_continuous &&
+ if (this->fine_element_is_continuous &&
this->weights_compressed.size() == 0)
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
- vec_fine_ptr->local_element(indices_fine[i]) +=
+ dst.local_element(indices_fine[i]) +=
evaluation_data_fine[i][v] * weights[i];
else
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
- vec_fine_ptr->local_element(indices_fine[i]) +=
+ dst.local_element(indices_fine[i]) +=
evaluation_data_fine[i][v];
indices_fine += scheme.n_dofs_per_cell_fine;
- if (fine_element_is_continuous)
+ if (this->fine_element_is_continuous)
weights += scheme.n_dofs_per_cell_fine;
}
}
}
-
- if (fine_element_is_continuous || use_dst_inplace == false)
- compress(*vec_fine_ptr, VectorOperation::add);
-
- if (use_dst_inplace == false)
- dst += this->vec_fine;
-
- if (use_src_inplace)
- zero_out_ghost_values(*vec_coarse_ptr);
}
-template <int dim, typename Number>
+template <typename Number>
void
-MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
+MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>::
restrict_and_add(LinearAlgebra::distributed::Vector<Number> & dst,
const LinearAlgebra::distributed::Vector<Number> &src) const
{
- using VectorizedArrayType = VectorizedArray<Number>;
-
- const unsigned int n_lanes = VectorizedArrayType::size();
-
const bool use_src_inplace = this->vec_fine.size() == 0;
const auto vec_fine_ptr = use_src_inplace ? &src : &this->vec_fine;
- Assert(vec_fine_ptr->get_partitioner().get() == partitioner_fine.get(),
+ Assert(vec_fine_ptr->get_partitioner().get() == this->partitioner_fine.get(),
ExcInternalError());
const bool use_dst_inplace = this->vec_coarse.size() == 0;
const auto vec_coarse_ptr = use_dst_inplace ? &dst : &this->vec_coarse;
- Assert(vec_coarse_ptr->get_partitioner().get() == partitioner_coarse.get(),
+ Assert(vec_coarse_ptr->get_partitioner().get() ==
+ this->partitioner_coarse.get(),
ExcInternalError());
if (use_src_inplace == false)
this->vec_fine.copy_locally_owned_data_from(src);
if (fine_element_is_continuous || use_src_inplace == false)
- update_ghost_values(*vec_fine_ptr);
+ this->update_ghost_values(*vec_fine_ptr);
if (use_dst_inplace == false)
*vec_coarse_ptr = 0.0;
- zero_out_ghost_values(*vec_coarse_ptr); // since we might add into the
- // ghost values and call compress
+ this->zero_out_ghost_values(
+ *vec_coarse_ptr); // since we might add into the
+ // ghost values and call compress
+
+ this->restrict_and_add_internal(*vec_coarse_ptr, *vec_fine_ptr);
+
+ // clean up related to update_ghost_values()
+ if (fine_element_is_continuous == false && use_src_inplace == false)
+ this->zero_out_ghost_values(*vec_fine_ptr); // internal vector (DG)
+ else if (fine_element_is_continuous && use_src_inplace == false)
+ vec_fine_ptr->set_ghost_state(false); // internal vector (CG)
+ else if (fine_element_is_continuous)
+ this->zero_out_ghost_values(*vec_fine_ptr); // external vector
+
+ this->compress(*vec_coarse_ptr, VectorOperation::add);
+
+ if (use_dst_inplace == false)
+ dst += this->vec_coarse;
+}
+
+
+
+template <int dim, typename Number>
+void
+MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
+ restrict_and_add_internal(
+ LinearAlgebra::distributed::Vector<Number> & dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
+{
+ using VectorizedArrayType = VectorizedArray<Number>;
+
+ const unsigned int n_lanes = VectorizedArrayType::size();
AlignedVector<VectorizedArrayType> evaluation_data_fine;
AlignedVector<VectorizedArrayType> evaluation_data_coarse;
- const unsigned int * indices_fine = level_dof_indices_fine.data();
- const Number * weights = nullptr;
+ const unsigned int *indices_fine = this->level_dof_indices_fine.data();
+ const Number * weights = nullptr;
const VectorizedArray<Number> *weights_compressed = nullptr;
- if (fine_element_is_continuous)
+ if (this->fine_element_is_continuous)
{
weights = this->weights.data();
weights_compressed = this->weights_compressed.data();
// read from source vector and weight
for (unsigned int v = 0; v < n_lanes_filled; ++v)
{
- if (fine_element_is_continuous &&
+ if (this->fine_element_is_continuous &&
this->weights_compressed.size() == 0)
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
evaluation_data_fine[i][v] =
- vec_fine_ptr->local_element(indices_fine[i]) * weights[i];
+ src.local_element(indices_fine[i]) * weights[i];
else
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
evaluation_data_fine[i][v] =
- vec_fine_ptr->local_element(indices_fine[i]);
+ src.local_element(indices_fine[i]);
indices_fine += scheme.n_dofs_per_cell_fine;
- if (fine_element_is_continuous)
+ if (this->fine_element_is_continuous)
weights += scheme.n_dofs_per_cell_fine;
}
- if (fine_element_is_continuous && this->weights_compressed.size() > 0)
+ if (this->fine_element_is_continuous &&
+ this->weights_compressed.size() > 0)
{
internal::
weight_fe_q_dofs_by_entity<dim, -1, VectorizedArray<Number>>(
constraint_info.apply_hanging_node_constraints(
cell_counter, n_lanes_filled, true, evaluation_data_coarse);
constraint_info.read_write_operation(writer,
- *vec_coarse_ptr,
+ dst,
evaluation_data_coarse.data(),
cell_counter,
n_lanes_filled,
cell_counter += n_lanes_filled;
}
}
-
- // clean up related to update_ghost_values()
- if (fine_element_is_continuous == false && use_src_inplace == false)
- zero_out_ghost_values(*vec_fine_ptr); // internal vector (DG)
- else if (fine_element_is_continuous && use_src_inplace == false)
- vec_fine_ptr->set_ghost_state(false); // internal vector (CG)
- else if (fine_element_is_continuous)
- zero_out_ghost_values(*vec_fine_ptr); // external vector
-
- compress(*vec_coarse_ptr, VectorOperation::add);
-
- if (use_dst_inplace == false)
- dst += this->vec_coarse;
}
const bool use_src_inplace = this->vec_fine.size() == 0;
const auto vec_fine_ptr = use_src_inplace ? &src : &this->vec_fine;
- Assert(vec_fine_ptr->get_partitioner().get() == partitioner_fine.get(),
+ Assert(vec_fine_ptr->get_partitioner().get() == this->partitioner_fine.get(),
ExcInternalError());
const bool use_dst_inplace = this->vec_coarse.size() == 0;
const auto vec_coarse_ptr = use_dst_inplace ? &dst : &this->vec_coarse;
- Assert(vec_coarse_ptr->get_partitioner().get() == partitioner_coarse.get(),
+ Assert(vec_coarse_ptr->get_partitioner().get() ==
+ this->partitioner_coarse.get(),
ExcInternalError());
if (use_src_inplace == false)
this->vec_fine.copy_locally_owned_data_from(src);
- if (fine_element_is_continuous || use_src_inplace == false)
- update_ghost_values(*vec_fine_ptr);
+ if (this->fine_element_is_continuous || use_src_inplace == false)
+ this->update_ghost_values(*vec_fine_ptr);
*vec_coarse_ptr = 0.0;
AlignedVector<VectorizedArrayType> evaluation_data_fine;
AlignedVector<VectorizedArrayType> evaluation_data_coarse;
- const unsigned int *indices_fine = level_dof_indices_fine.data();
+ const unsigned int *indices_fine = this->level_dof_indices_fine.data();
unsigned int cell_counter = 0;
// clean up related to update_ghost_values()
if (use_src_inplace == false)
vec_fine_ptr->set_ghost_state(false); // internal vector
- else if (fine_element_is_continuous)
- zero_out_ghost_values(*vec_fine_ptr); // external vector
+ else if (this->fine_element_is_continuous)
+ this->zero_out_ghost_values(*vec_fine_ptr); // external vector
if (use_dst_inplace == false)
dst.copy_locally_owned_data_from(this->vec_coarse);
}
+namespace internal
+{
+ namespace
+ {
+ bool
+ is_partitioner_contained(
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &external_partitioner)
+ {
+ // no external partitioner has been given
+ if (external_partitioner.get() == nullptr)
+ return false;
+
+ // check if locally owned ranges are the same
+ if (external_partitioner->size() != partitioner->size())
+ return false;
+
+ if (external_partitioner->locally_owned_range() !=
+ partitioner->locally_owned_range())
+ return false;
+
+ const int ghosts_locally_contained =
+ ((external_partitioner->ghost_indices() &
+ partitioner->ghost_indices()) == partitioner->ghost_indices()) ?
+ 1 :
+ 0;
-template <int dim, typename Number>
+ // check if ghost values are contained in external partititioner
+ return Utilities::MPI::min(ghosts_locally_contained,
+ partitioner->get_mpi_communicator()) == 1;
+ }
+
+ std::shared_ptr<Utilities::MPI::Partitioner>
+ create_embedded_partitioner(
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &larger_partitioner)
+ {
+ auto embedded_partitioner = std::make_shared<Utilities::MPI::Partitioner>(
+ larger_partitioner->locally_owned_range(),
+ larger_partitioner->get_mpi_communicator());
+
+ embedded_partitioner->set_ghost_indices(
+ partitioner->ghost_indices(), larger_partitioner->ghost_indices());
+
+ return embedded_partitioner;
+ }
+ } // namespace
+} // namespace internal
+
+template <typename Number>
+template <typename ConstraintInfo>
void
-MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
- enable_inplace_operations_if_possible(
+MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>::
+ internal_enable_inplace_operations_if_possible(
const std::shared_ptr<const Utilities::MPI::Partitioner>
&external_partitioner_coarse,
const std::shared_ptr<const Utilities::MPI::Partitioner>
- &external_partitioner_fine)
+ & external_partitioner_fine,
+ ConstraintInfo &constraint_info)
{
- const auto is_partitioner_contained =
- [](const auto &partitioner, const auto &external_partitioner) -> bool {
- // no external partitioner has been given
- if (external_partitioner.get() == nullptr)
- return false;
-
- // check if locally owned ranges are the same
- if (external_partitioner->size() != partitioner->size())
- return false;
-
- if (external_partitioner->locally_owned_range() !=
- partitioner->locally_owned_range())
- return false;
-
- const int ghosts_locally_contained =
- ((external_partitioner->ghost_indices() & partitioner->ghost_indices()) ==
- partitioner->ghost_indices()) ?
- 1 :
- 0;
-
- // check if ghost values are contained in external partititioner
- return Utilities::MPI::min(ghosts_locally_contained,
- partitioner->get_mpi_communicator()) == 1;
- };
-
- const auto create_embedded_partitioner = [&](const auto &partitioner,
- const auto &larger_partitioner) {
- auto embedded_partitioner = std::make_shared<Utilities::MPI::Partitioner>(
- larger_partitioner->locally_owned_range(),
- larger_partitioner->get_mpi_communicator());
-
- embedded_partitioner->set_ghost_indices(
- partitioner->ghost_indices(), larger_partitioner->ghost_indices());
-
- return embedded_partitioner;
- };
-
if (this->partitioner_coarse->is_globally_compatible(
*external_partitioner_coarse))
{
this->vec_coarse.reinit(0);
this->partitioner_coarse = external_partitioner_coarse;
}
- else if (is_partitioner_contained(this->partitioner_coarse,
- external_partitioner_coarse))
+ else if (internal::is_partitioner_contained(this->partitioner_coarse,
+ external_partitioner_coarse))
{
this->vec_coarse.reinit(0);
this->partitioner_coarse->local_to_global(i));
this->partitioner_coarse_embedded =
- create_embedded_partitioner(this->partitioner_coarse,
- external_partitioner_coarse);
+ internal::create_embedded_partitioner(this->partitioner_coarse,
+ external_partitioner_coarse);
this->partitioner_coarse = external_partitioner_coarse;
}
this->vec_fine.reinit(0);
this->partitioner_fine = external_partitioner_fine;
}
- else if (is_partitioner_contained(this->partitioner_fine,
- external_partitioner_fine))
+ else if (internal::is_partitioner_contained(this->partitioner_fine,
+ external_partitioner_fine))
{
this->vec_fine.reinit(0);
- for (auto &i : level_dof_indices_fine)
+ for (auto &i : this->level_dof_indices_fine)
i = external_partitioner_fine->global_to_local(
this->partitioner_fine->local_to_global(i));
this->partitioner_fine_embedded =
- create_embedded_partitioner(this->partitioner_fine,
- external_partitioner_fine);
+ internal::create_embedded_partitioner(this->partitioner_fine,
+ external_partitioner_fine);
this->partitioner_fine = external_partitioner_fine;
}
}
+template <int dim, typename Number>
+void
+MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
+ enable_inplace_operations_if_possible(
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &external_partitioner_coarse,
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &external_partitioner_fine)
+{
+ this->internal_enable_inplace_operations_if_possible(
+ external_partitioner_coarse, external_partitioner_fine, constraint_info);
+}
+
template <int dim, typename Number>
size += scheme.restriction_matrix_1d.memory_consumption();
}
- size += partitioner_fine->memory_consumption();
- size += partitioner_coarse->memory_consumption();
- size += vec_fine.memory_consumption();
- size += vec_coarse.memory_consumption();
+ size += this->partitioner_fine->memory_consumption();
+ size += this->partitioner_coarse->memory_consumption();
+ size += this->vec_fine.memory_consumption();
+ size += this->vec_coarse.memory_consumption();
size += MemoryConsumption::memory_consumption(weights);
- size += MemoryConsumption::memory_consumption(level_dof_indices_fine);
+ size += MemoryConsumption::memory_consumption(this->level_dof_indices_fine);
size += constraint_info.memory_consumption();
return size;
-template <int dim, typename Number>
+template <typename Number>
void
-MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
+MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>::
update_ghost_values(
const LinearAlgebra::distributed::Vector<Number> &vec) const
{
- if ((vec.get_partitioner().get() == partitioner_coarse.get()) &&
- (partitioner_coarse_embedded != nullptr))
- internal::SimpleVectorDataExchange<Number>(partitioner_coarse_embedded,
- buffer_coarse_embedded)
+ if ((vec.get_partitioner().get() == this->partitioner_coarse.get()) &&
+ (this->partitioner_coarse_embedded != nullptr))
+ internal::SimpleVectorDataExchange<Number>(
+ this->partitioner_coarse_embedded, this->buffer_coarse_embedded)
.update_ghost_values(vec);
- else if ((vec.get_partitioner().get() == partitioner_fine.get()) &&
- (partitioner_fine_embedded != nullptr))
- internal::SimpleVectorDataExchange<Number>(partitioner_fine_embedded,
- buffer_fine_embedded)
+ else if ((vec.get_partitioner().get() == this->partitioner_fine.get()) &&
+ (this->partitioner_fine_embedded != nullptr))
+ internal::SimpleVectorDataExchange<Number>(this->partitioner_fine_embedded,
+ this->buffer_fine_embedded)
.update_ghost_values(vec);
else
vec.update_ghost_values();
-template <int dim, typename Number>
+template <typename Number>
void
-MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::compress(
+MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>::compress(
LinearAlgebra::distributed::Vector<Number> &vec,
const VectorOperation::values op) const
{
Assert(op == VectorOperation::add, ExcNotImplemented());
- if ((vec.get_partitioner().get() == partitioner_coarse.get()) &&
- (partitioner_coarse_embedded != nullptr))
- internal::SimpleVectorDataExchange<Number>(partitioner_coarse_embedded,
- buffer_coarse_embedded)
+ if ((vec.get_partitioner().get() == this->partitioner_coarse.get()) &&
+ (this->partitioner_coarse_embedded != nullptr))
+ internal::SimpleVectorDataExchange<Number>(
+ this->partitioner_coarse_embedded, this->buffer_coarse_embedded)
.compress(vec);
- else if ((vec.get_partitioner().get() == partitioner_fine.get()) &&
- (partitioner_fine_embedded != nullptr))
- internal::SimpleVectorDataExchange<Number>(partitioner_fine_embedded,
- buffer_fine_embedded)
+ else if ((vec.get_partitioner().get() == this->partitioner_fine.get()) &&
+ (this->partitioner_fine_embedded != nullptr))
+ internal::SimpleVectorDataExchange<Number>(this->partitioner_fine_embedded,
+ this->buffer_fine_embedded)
.compress(vec);
else
vec.compress(op);
-template <int dim, typename Number>
+template <typename Number>
void
-MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
+MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>::
zero_out_ghost_values(
const LinearAlgebra::distributed::Vector<Number> &vec) const
{
- if ((vec.get_partitioner().get() == partitioner_coarse.get()) &&
- (partitioner_coarse_embedded != nullptr))
- internal::SimpleVectorDataExchange<Number>(partitioner_coarse_embedded,
- buffer_coarse_embedded)
+ if ((vec.get_partitioner().get() == this->partitioner_coarse.get()) &&
+ (this->partitioner_coarse_embedded != nullptr))
+ internal::SimpleVectorDataExchange<Number>(
+ this->partitioner_coarse_embedded, this->buffer_coarse_embedded)
.zero_out_ghost_values(vec);
- else if ((vec.get_partitioner().get() == (partitioner_fine.get()) &&
- partitioner_fine_embedded != nullptr))
- internal::SimpleVectorDataExchange<Number>(partitioner_fine_embedded,
- buffer_fine_embedded)
+ else if ((vec.get_partitioner().get() == (this->partitioner_fine.get()) &&
+ this->partitioner_fine_embedded != nullptr))
+ internal::SimpleVectorDataExchange<Number>(this->partitioner_fine_embedded,
+ this->buffer_fine_embedded)
.zero_out_ghost_values(vec);
else
vec.zero_out_ghost_values();