From: Marco Feder Date: Thu, 27 Apr 2023 09:29:23 +0000 (+0200) Subject: Introduce MGTwoLevelTransferBase class X-Git-Tag: v9.5.0-rc1~280^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=23b32f4bdc7d09565abac8da99de5a2684e7b8fd;p=dealii.git Introduce MGTwoLevelTransferBase class Co-authored-by: peterrum --- diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.h index 352080978b..d83ca0bafd 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.h @@ -17,6 +17,7 @@ #define dealii_mg_transfer_global_coarsening_h #include +#include #include #include @@ -153,26 +154,222 @@ namespace MGTransferGlobalCoarseningTools +template +class MGTwoLevelTransferBase : public Subscriptor +{ +public: + /** + * Perform prolongation. + */ + virtual void + prolongate_and_add(VectorType &dst, const VectorType &src) const = 0; + + /** + * Perform restriction. + */ + virtual void + restrict_and_add(VectorType &dst, const VectorType &src) const = 0; + + /** + * Perform interpolation of a solution vector from the fine level to the + * coarse level. This function is different from restriction, where a + * weighted residual is transferred to a coarser level (transposition of + * prolongation matrix). + */ + virtual void + interpolate(VectorType &dst, const VectorType &src) const = 0; + + /** + * Enable inplace vector operations if external and internal vectors + * are compatible. + */ + virtual void + enable_inplace_operations_if_possible( + const std::shared_ptr + &partitioner_coarse, + const std::shared_ptr + &partitioner_fine) = 0; + + /** + * Return the memory consumption of the allocated memory in this class. + */ + virtual std::size_t + memory_consumption() const = 0; +}; + + + +template +class MGTwoLevelTransferBase> + : public Subscriptor +{ +public: + using VectorType = LinearAlgebra::distributed::Vector; + + /** + * Perform prolongation. + */ + virtual void + prolongate_and_add(VectorType &dst, const VectorType &src) const; + + /** + * Perform restriction. + */ + virtual void + restrict_and_add(VectorType &dst, const VectorType &src) const; + + /** + * Perform interpolation of a solution vector from the fine level to the + * coarse level. This function is different from restriction, where a + * weighted residual is transferred to a coarser level (transposition of + * prolongation matrix). + */ + virtual void + interpolate(VectorType &dst, const VectorType &src) const = 0; + + /** + * Enable inplace vector operations if external and internal vectors + * are compatible. + */ + virtual void + enable_inplace_operations_if_possible( + const std::shared_ptr + &partitioner_coarse, + const std::shared_ptr + &partitioner_fine) = 0; + + /** + * Return the memory consumption of the allocated memory in this class. + */ + virtual std::size_t + memory_consumption() const = 0; + +protected: + /** + * Perform prolongation. + */ + virtual void + prolongate_and_add_internal( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const = 0; + + /** + * Perform restriction. + */ + virtual void + restrict_and_add_internal( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const = 0; + + void + update_ghost_values( + const LinearAlgebra::distributed::Vector &vec) const; + + void + compress(LinearAlgebra::distributed::Vector &vec, + const VectorOperation::values op) const; + + void + zero_out_ghost_values( + const LinearAlgebra::distributed::Vector &vec) const; + + /** + * Enable inplace vector operations if external and internal vectors + * are compatible. + */ + template + void + internal_enable_inplace_operations_if_possible( + const std::shared_ptr + &partitioner_coarse, + const std::shared_ptr &partitioner_fine, + ConstraintInfo & constraint_info); + + /** + * Flag if the finite elements on the fine cells are continuous. If yes, + * the multiplicity of DoF sharing a vertex/line as well as constraints have + * to be taken into account via weights. + */ + bool fine_element_is_continuous; + + /** + * Partitioner needed by the intermediate vector. + */ + std::shared_ptr partitioner_coarse; + + /** + * Partitioner needed by the intermediate vector. + */ + std::shared_ptr partitioner_fine; + + /** + * Internal vector needed for collecting all degrees of freedom of the fine + * cells. It is only initialized if the fine-level DoF indices touch DoFs + * other than the locally active ones (which we always assume can be + * accessed by the given vectors in the prolongate/restrict functions), + * otherwise it is left at size zero. + */ + mutable LinearAlgebra::distributed::Vector vec_fine; + + /** + * Internal vector on that the actual prolongation/restriction is performed. + */ + mutable LinearAlgebra::distributed::Vector vec_coarse; + + /** + * Embedded partitioner for efficient communication if locally relevant DoFs + * are a subset of an external Partitioner object. + */ + std::shared_ptr + partitioner_coarse_embedded; + + /** + * Embedded partitioner for efficient communication if locally relevant DoFs + * are a subset of an external Partitioner object. + */ + std::shared_ptr partitioner_fine_embedded; + + /** + * Buffer for efficient communication if locally relevant DoFs + * are a subset of an external Partitioner object. + */ + mutable AlignedVector buffer_coarse_embedded; + + /** + * Buffer for efficient communication if locally relevant DoFs + * are a subset of an external Partitioner object. + */ + mutable AlignedVector buffer_fine_embedded; + + /** + * DoF indices of the fine cells, expressed in indices local to the MPI + * rank. + */ + std::vector level_dof_indices_fine; +}; + + + /** * Class for transfer between two multigrid levels for p- or global coarsening. * * The implementation of this class is explained in detail in @cite munch2022gc. */ template -class MGTwoLevelTransfer +class MGTwoLevelTransfer : public MGTwoLevelTransferBase { public: /** * Perform prolongation. */ void - prolongate_and_add(VectorType &dst, const VectorType &src) const; + prolongate_and_add(VectorType &dst, const VectorType &src) const override; /** * Perform restriction. */ void - restrict_and_add(VectorType &dst, const VectorType &src) const; + restrict_and_add(VectorType &dst, const VectorType &src) const override; /** * Perform interpolation of a solution vector from the fine level to the @@ -181,7 +378,7 @@ public: * prolongation matrix). */ void - interpolate(VectorType &dst, const VectorType &src) const; + interpolate(VectorType &dst, const VectorType &src) const override; /** * Enable inplace vector operations if external and internal vectors @@ -191,13 +388,14 @@ public: enable_inplace_operations_if_possible( const std::shared_ptr &partitioner_coarse, - const std::shared_ptr &partitioner_fine); + const std::shared_ptr &partitioner_fine) + override; /** * Return the memory consumption of the allocated memory in this class. */ std::size_t - memory_consumption() const; + memory_consumption() const override; }; @@ -210,6 +408,7 @@ public: */ template class MGTwoLevelTransfer> + : public MGTwoLevelTransferBase> { public: /** @@ -283,21 +482,6 @@ public: fast_polynomial_transfer_supported(const unsigned int fe_degree_fine, const unsigned int fe_degree_coarse); - /** - * Perform prolongation. - */ - void - prolongate_and_add( - LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src) const; - - /** - * Perform restriction. - */ - void - restrict_and_add(LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src) const; - /** * Perform interpolation of a solution vector from the fine level to the * coarse level. This function is different from restriction, where a @@ -305,8 +489,9 @@ public: * prolongation matrix). */ void - interpolate(LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src) const; + interpolate( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const override; /** * Enable inplace vector operations if external and internal vectors @@ -316,26 +501,27 @@ public: enable_inplace_operations_if_possible( const std::shared_ptr &partitioner_coarse, - const std::shared_ptr &partitioner_fine); + const std::shared_ptr &partitioner_fine) + override; /** * Return the memory consumption of the allocated memory in this class. */ std::size_t - memory_consumption() const; - -private: - void - update_ghost_values(const LinearAlgebra::distributed::Vector &) const; + memory_consumption() const override; +protected: void - compress(LinearAlgebra::distributed::Vector &, - const VectorOperation::values) const; + prolongate_and_add_internal( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const override; void - zero_out_ghost_values( - const LinearAlgebra::distributed::Vector &) const; + restrict_and_add_internal( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const override; +private: /** * A multigrid transfer scheme. A multrigrid transfer class can have different * transfer schemes to enable p-adaptivity (one transfer scheme per @@ -411,62 +597,6 @@ private: */ std::vector schemes; - /** - * Flag if the finite elements on the fine cells are continuous. If yes, - * the multiplicity of DoF sharing a vertex/line as well as constraints have - * to be taken into account via weights. - */ - bool fine_element_is_continuous; - - /** - * Partitioner needed by the intermediate vector. - */ - std::shared_ptr partitioner_fine; - - /** - * Embedded partitioner for efficient communication if locally relevant DoFs - * are a subset of an external Partitioner object. - */ - std::shared_ptr partitioner_fine_embedded; - - /** - * Buffer for efficient communication if locally relevant DoFs - * are a subset of an external Partitioner object. - */ - mutable AlignedVector buffer_fine_embedded; - - /** - * Partitioner needed by the intermediate vector. - */ - std::shared_ptr partitioner_coarse; - - /** - * Embedded partitioner for efficient communication if locally relevant DoFs - * are a subset of an external Partitioner object. - */ - std::shared_ptr - partitioner_coarse_embedded; - - /** - * Buffer for efficient communication if locally relevant DoFs - * are a subset of an external Partitioner object. - */ - mutable AlignedVector buffer_coarse_embedded; - - /** - * Internal vector needed for collecting all degrees of freedom of the fine - * cells. It is only initialized if the fine-level DoF indices touch DoFs - * other than the locally active ones (which we always assume can be - * accessed by the given vectors in the prolongate/restrict functions), - * otherwise it is left at size zero. - */ - mutable LinearAlgebra::distributed::Vector vec_fine; - - /** - * Internal vector on that the actual prolongation/restriction is performed. - */ - mutable LinearAlgebra::distributed::Vector vec_coarse; - /** * Helper class for reading from and writing to global vectors and for * applying constraints. @@ -485,12 +615,6 @@ private: */ AlignedVector> weights_compressed; - /** - * DoF indices of the fine cells, expressed in indices local to the MPI - * rank. - */ - std::vector level_dof_indices_fine; - /** * Number of components. */ @@ -529,8 +653,9 @@ public: * internal level vectors within the function call copy_to_mg() if used in the * context of PreconditionMG. */ + template MGTransferGlobalCoarsening( - const MGLevelObject> &transfer, + const MGLevelObject &transfer, const std::function &initialize_dof_vector = {}); @@ -661,7 +786,7 @@ private: /** * Collection of the two-level transfer operators. */ - MGLevelObject> transfer; + MGLevelObject>> transfer; /** * External partitioners used during initialize_dof_vector(). @@ -711,12 +836,22 @@ private: template +template MGTransferGlobalCoarsening::MGTransferGlobalCoarsening( - const MGLevelObject> &transfer, + const MGLevelObject &transfer, const std::function &initialize_dof_vector) - : transfer(transfer) { + const unsigned int min_level = transfer.min_level(); + const unsigned int max_level = transfer.max_level(); + + this->transfer.resize(min_level, max_level); + + for (unsigned int l = min_level; l <= max_level; ++l) + this->transfer[l] = &const_cast &>( + static_cast &>( + Utilities::get_underlying_value(transfer[l]))); + this->build(initialize_dof_vector); } @@ -738,7 +873,7 @@ MGTransferGlobalCoarsening::build( AssertDimension(this->external_partitioners.size(), transfer.n_levels()); for (unsigned int l = min_level + 1; l <= max_level; ++l) - transfer[l].enable_inplace_operations_if_possible( + transfer[l]->enable_inplace_operations_if_possible( this->external_partitioners[l - 1 - min_level], this->external_partitioners[l - min_level]); } @@ -817,7 +952,7 @@ MGTransferGlobalCoarsening::prolongate_and_add( VectorType & dst, const VectorType & src) const { - this->transfer[to_level].prolongate_and_add(dst, src); + this->transfer[to_level]->prolongate_and_add(dst, src); } @@ -829,7 +964,7 @@ MGTransferGlobalCoarsening::restrict_and_add( VectorType & dst, const VectorType & src) const { - this->transfer[from_level].restrict_and_add(dst, src); + this->transfer[from_level]->restrict_and_add(dst, src); } @@ -891,7 +1026,7 @@ MGTransferGlobalCoarsening::interpolate_to_mg( dst[transfer.max_level()].copy_locally_owned_data_from(src); for (unsigned int l = max_level; l > min_level; --l) - this->transfer[l].interpolate(dst[l - 1], dst[l]); + this->transfer[l]->interpolate(dst[l - 1], dst[l]); } @@ -921,7 +1056,7 @@ MGTransferGlobalCoarsening::memory_consumption() const const unsigned int max_level = transfer.max_level(); for (unsigned int l = min_level + 1; l <= max_level; ++l) - size += this->transfer[l].memory_consumption(); + size += this->transfer[l]->memory_consumption(); return size; } diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h index 5cec4ffa6a..dd8f5648ce 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -38,6 +39,7 @@ #include #include +#include #include #include @@ -2594,43 +2596,65 @@ namespace MGTransferGlobalCoarseningTools -template +template void -MGTwoLevelTransfer>:: +MGTwoLevelTransferBase>:: prolongate_and_add( LinearAlgebra::distributed::Vector & dst, const LinearAlgebra::distributed::Vector &src) const { - using VectorizedArrayType = VectorizedArray; - - const unsigned int n_lanes = VectorizedArrayType::size(); - const bool use_dst_inplace = this->vec_fine.size() == 0; const auto vec_fine_ptr = use_dst_inplace ? &dst : &this->vec_fine; - Assert(vec_fine_ptr->get_partitioner().get() == partitioner_fine.get(), + Assert(vec_fine_ptr->get_partitioner().get() == this->partitioner_fine.get(), ExcInternalError()); const bool use_src_inplace = this->vec_coarse.size() == 0; const auto vec_coarse_ptr = use_src_inplace ? &src : &this->vec_coarse; - Assert(vec_coarse_ptr->get_partitioner().get() == partitioner_coarse.get(), + Assert(vec_coarse_ptr->get_partitioner().get() == + this->partitioner_coarse.get(), ExcInternalError()); if (use_src_inplace == false) - vec_coarse.copy_locally_owned_data_from(src); + this->vec_coarse.copy_locally_owned_data_from(src); - update_ghost_values(*vec_coarse_ptr); + this->update_ghost_values(*vec_coarse_ptr); if (fine_element_is_continuous && (use_dst_inplace == false)) *vec_fine_ptr = Number(0.); + this->prolongate_and_add_internal(*vec_fine_ptr, *vec_coarse_ptr); + + if (fine_element_is_continuous || use_dst_inplace == false) + this->compress(*vec_fine_ptr, VectorOperation::add); + + if (use_dst_inplace == false) + dst += this->vec_fine; + + if (use_src_inplace) + this->zero_out_ghost_values(*vec_coarse_ptr); +} + + + +template +void +MGTwoLevelTransfer>:: + prolongate_and_add_internal( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const +{ + using VectorizedArrayType = VectorizedArray; + + const unsigned int n_lanes = VectorizedArrayType::size(); + AlignedVector evaluation_data_fine; AlignedVector evaluation_data_coarse; - const unsigned int * indices_fine = level_dof_indices_fine.data(); - const Number * weights = nullptr; + const unsigned int *indices_fine = this->level_dof_indices_fine.data(); + const Number * weights = nullptr; const VectorizedArray *weights_compressed = nullptr; - if (fine_element_is_continuous) + if (this->fine_element_is_continuous) { weights = this->weights.data(); weights_compressed = this->weights_compressed.data(); @@ -2668,7 +2692,7 @@ MGTwoLevelTransfer>:: // read from src vector (similar to FEEvaluation::read_dof_values()) internal::VectorReader reader; constraint_info.read_write_operation(reader, - *vec_coarse_ptr, + src, evaluation_data_coarse.data(), cell_counter, n_lanes_filled, @@ -2700,7 +2724,8 @@ MGTwoLevelTransfer>:: // ------------------------------ fine ------------------------------- // weight and write into dst vector - if (fine_element_is_continuous && this->weights_compressed.size() > 0) + if (this->fine_element_is_continuous && + this->weights_compressed.size() > 0) { internal:: weight_fe_q_dofs_by_entity>( @@ -2713,76 +2738,94 @@ MGTwoLevelTransfer>:: for (unsigned int v = 0; v < n_lanes_filled; ++v) { - if (fine_element_is_continuous && + if (this->fine_element_is_continuous && this->weights_compressed.size() == 0) for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i) - vec_fine_ptr->local_element(indices_fine[i]) += + dst.local_element(indices_fine[i]) += evaluation_data_fine[i][v] * weights[i]; else for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i) - vec_fine_ptr->local_element(indices_fine[i]) += + dst.local_element(indices_fine[i]) += evaluation_data_fine[i][v]; indices_fine += scheme.n_dofs_per_cell_fine; - if (fine_element_is_continuous) + if (this->fine_element_is_continuous) weights += scheme.n_dofs_per_cell_fine; } } } - - if (fine_element_is_continuous || use_dst_inplace == false) - compress(*vec_fine_ptr, VectorOperation::add); - - if (use_dst_inplace == false) - dst += this->vec_fine; - - if (use_src_inplace) - zero_out_ghost_values(*vec_coarse_ptr); } -template +template void -MGTwoLevelTransfer>:: +MGTwoLevelTransferBase>:: restrict_and_add(LinearAlgebra::distributed::Vector & dst, const LinearAlgebra::distributed::Vector &src) const { - using VectorizedArrayType = VectorizedArray; - - const unsigned int n_lanes = VectorizedArrayType::size(); - const bool use_src_inplace = this->vec_fine.size() == 0; const auto vec_fine_ptr = use_src_inplace ? &src : &this->vec_fine; - Assert(vec_fine_ptr->get_partitioner().get() == partitioner_fine.get(), + Assert(vec_fine_ptr->get_partitioner().get() == this->partitioner_fine.get(), ExcInternalError()); const bool use_dst_inplace = this->vec_coarse.size() == 0; const auto vec_coarse_ptr = use_dst_inplace ? &dst : &this->vec_coarse; - Assert(vec_coarse_ptr->get_partitioner().get() == partitioner_coarse.get(), + Assert(vec_coarse_ptr->get_partitioner().get() == + this->partitioner_coarse.get(), ExcInternalError()); if (use_src_inplace == false) this->vec_fine.copy_locally_owned_data_from(src); if (fine_element_is_continuous || use_src_inplace == false) - update_ghost_values(*vec_fine_ptr); + this->update_ghost_values(*vec_fine_ptr); if (use_dst_inplace == false) *vec_coarse_ptr = 0.0; - zero_out_ghost_values(*vec_coarse_ptr); // since we might add into the - // ghost values and call compress + this->zero_out_ghost_values( + *vec_coarse_ptr); // since we might add into the + // ghost values and call compress + + this->restrict_and_add_internal(*vec_coarse_ptr, *vec_fine_ptr); + + // clean up related to update_ghost_values() + if (fine_element_is_continuous == false && use_src_inplace == false) + this->zero_out_ghost_values(*vec_fine_ptr); // internal vector (DG) + else if (fine_element_is_continuous && use_src_inplace == false) + vec_fine_ptr->set_ghost_state(false); // internal vector (CG) + else if (fine_element_is_continuous) + this->zero_out_ghost_values(*vec_fine_ptr); // external vector + + this->compress(*vec_coarse_ptr, VectorOperation::add); + + if (use_dst_inplace == false) + dst += this->vec_coarse; +} + + + +template +void +MGTwoLevelTransfer>:: + restrict_and_add_internal( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const +{ + using VectorizedArrayType = VectorizedArray; + + const unsigned int n_lanes = VectorizedArrayType::size(); AlignedVector evaluation_data_fine; AlignedVector evaluation_data_coarse; - const unsigned int * indices_fine = level_dof_indices_fine.data(); - const Number * weights = nullptr; + const unsigned int *indices_fine = this->level_dof_indices_fine.data(); + const Number * weights = nullptr; const VectorizedArray *weights_compressed = nullptr; - if (fine_element_is_continuous) + if (this->fine_element_is_continuous) { weights = this->weights.data(); weights_compressed = this->weights_compressed.data(); @@ -2820,23 +2863,24 @@ MGTwoLevelTransfer>:: // read from source vector and weight for (unsigned int v = 0; v < n_lanes_filled; ++v) { - if (fine_element_is_continuous && + if (this->fine_element_is_continuous && this->weights_compressed.size() == 0) for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i) evaluation_data_fine[i][v] = - vec_fine_ptr->local_element(indices_fine[i]) * weights[i]; + src.local_element(indices_fine[i]) * weights[i]; else for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i) evaluation_data_fine[i][v] = - vec_fine_ptr->local_element(indices_fine[i]); + src.local_element(indices_fine[i]); indices_fine += scheme.n_dofs_per_cell_fine; - if (fine_element_is_continuous) + if (this->fine_element_is_continuous) weights += scheme.n_dofs_per_cell_fine; } - if (fine_element_is_continuous && this->weights_compressed.size() > 0) + if (this->fine_element_is_continuous && + this->weights_compressed.size() > 0) { internal:: weight_fe_q_dofs_by_entity>( @@ -2874,7 +2918,7 @@ MGTwoLevelTransfer>:: constraint_info.apply_hanging_node_constraints( cell_counter, n_lanes_filled, true, evaluation_data_coarse); constraint_info.read_write_operation(writer, - *vec_coarse_ptr, + dst, evaluation_data_coarse.data(), cell_counter, n_lanes_filled, @@ -2884,19 +2928,6 @@ MGTwoLevelTransfer>:: cell_counter += n_lanes_filled; } } - - // clean up related to update_ghost_values() - if (fine_element_is_continuous == false && use_src_inplace == false) - zero_out_ghost_values(*vec_fine_ptr); // internal vector (DG) - else if (fine_element_is_continuous && use_src_inplace == false) - vec_fine_ptr->set_ghost_state(false); // internal vector (CG) - else if (fine_element_is_continuous) - zero_out_ghost_values(*vec_fine_ptr); // external vector - - compress(*vec_coarse_ptr, VectorOperation::add); - - if (use_dst_inplace == false) - dst += this->vec_coarse; } @@ -2913,26 +2944,27 @@ MGTwoLevelTransfer>:: const bool use_src_inplace = this->vec_fine.size() == 0; const auto vec_fine_ptr = use_src_inplace ? &src : &this->vec_fine; - Assert(vec_fine_ptr->get_partitioner().get() == partitioner_fine.get(), + Assert(vec_fine_ptr->get_partitioner().get() == this->partitioner_fine.get(), ExcInternalError()); const bool use_dst_inplace = this->vec_coarse.size() == 0; const auto vec_coarse_ptr = use_dst_inplace ? &dst : &this->vec_coarse; - Assert(vec_coarse_ptr->get_partitioner().get() == partitioner_coarse.get(), + Assert(vec_coarse_ptr->get_partitioner().get() == + this->partitioner_coarse.get(), ExcInternalError()); if (use_src_inplace == false) this->vec_fine.copy_locally_owned_data_from(src); - if (fine_element_is_continuous || use_src_inplace == false) - update_ghost_values(*vec_fine_ptr); + if (this->fine_element_is_continuous || use_src_inplace == false) + this->update_ghost_values(*vec_fine_ptr); *vec_coarse_ptr = 0.0; AlignedVector evaluation_data_fine; AlignedVector evaluation_data_coarse; - const unsigned int *indices_fine = level_dof_indices_fine.data(); + const unsigned int *indices_fine = this->level_dof_indices_fine.data(); unsigned int cell_counter = 0; @@ -3021,69 +3053,84 @@ MGTwoLevelTransfer>:: // clean up related to update_ghost_values() if (use_src_inplace == false) vec_fine_ptr->set_ghost_state(false); // internal vector - else if (fine_element_is_continuous) - zero_out_ghost_values(*vec_fine_ptr); // external vector + else if (this->fine_element_is_continuous) + this->zero_out_ghost_values(*vec_fine_ptr); // external vector if (use_dst_inplace == false) dst.copy_locally_owned_data_from(this->vec_coarse); } +namespace internal +{ + namespace + { + bool + is_partitioner_contained( + const std::shared_ptr &partitioner, + const std::shared_ptr + &external_partitioner) + { + // no external partitioner has been given + if (external_partitioner.get() == nullptr) + return false; + + // check if locally owned ranges are the same + if (external_partitioner->size() != partitioner->size()) + return false; + + if (external_partitioner->locally_owned_range() != + partitioner->locally_owned_range()) + return false; + + const int ghosts_locally_contained = + ((external_partitioner->ghost_indices() & + partitioner->ghost_indices()) == partitioner->ghost_indices()) ? + 1 : + 0; -template + // check if ghost values are contained in external partititioner + return Utilities::MPI::min(ghosts_locally_contained, + partitioner->get_mpi_communicator()) == 1; + } + + std::shared_ptr + create_embedded_partitioner( + const std::shared_ptr &partitioner, + const std::shared_ptr + &larger_partitioner) + { + auto embedded_partitioner = std::make_shared( + larger_partitioner->locally_owned_range(), + larger_partitioner->get_mpi_communicator()); + + embedded_partitioner->set_ghost_indices( + partitioner->ghost_indices(), larger_partitioner->ghost_indices()); + + return embedded_partitioner; + } + } // namespace +} // namespace internal + +template +template void -MGTwoLevelTransfer>:: - enable_inplace_operations_if_possible( +MGTwoLevelTransferBase>:: + internal_enable_inplace_operations_if_possible( const std::shared_ptr &external_partitioner_coarse, const std::shared_ptr - &external_partitioner_fine) + & external_partitioner_fine, + ConstraintInfo &constraint_info) { - const auto is_partitioner_contained = - [](const auto &partitioner, const auto &external_partitioner) -> bool { - // no external partitioner has been given - if (external_partitioner.get() == nullptr) - return false; - - // check if locally owned ranges are the same - if (external_partitioner->size() != partitioner->size()) - return false; - - if (external_partitioner->locally_owned_range() != - partitioner->locally_owned_range()) - return false; - - const int ghosts_locally_contained = - ((external_partitioner->ghost_indices() & partitioner->ghost_indices()) == - partitioner->ghost_indices()) ? - 1 : - 0; - - // check if ghost values are contained in external partititioner - return Utilities::MPI::min(ghosts_locally_contained, - partitioner->get_mpi_communicator()) == 1; - }; - - const auto create_embedded_partitioner = [&](const auto &partitioner, - const auto &larger_partitioner) { - auto embedded_partitioner = std::make_shared( - larger_partitioner->locally_owned_range(), - larger_partitioner->get_mpi_communicator()); - - embedded_partitioner->set_ghost_indices( - partitioner->ghost_indices(), larger_partitioner->ghost_indices()); - - return embedded_partitioner; - }; - if (this->partitioner_coarse->is_globally_compatible( *external_partitioner_coarse)) { this->vec_coarse.reinit(0); this->partitioner_coarse = external_partitioner_coarse; } - else if (is_partitioner_contained(this->partitioner_coarse, - external_partitioner_coarse)) + else if (internal::is_partitioner_contained(this->partitioner_coarse, + external_partitioner_coarse)) { this->vec_coarse.reinit(0); @@ -3096,8 +3143,8 @@ MGTwoLevelTransfer>:: this->partitioner_coarse->local_to_global(i)); this->partitioner_coarse_embedded = - create_embedded_partitioner(this->partitioner_coarse, - external_partitioner_coarse); + internal::create_embedded_partitioner(this->partitioner_coarse, + external_partitioner_coarse); this->partitioner_coarse = external_partitioner_coarse; } @@ -3108,23 +3155,36 @@ MGTwoLevelTransfer>:: this->vec_fine.reinit(0); this->partitioner_fine = external_partitioner_fine; } - else if (is_partitioner_contained(this->partitioner_fine, - external_partitioner_fine)) + else if (internal::is_partitioner_contained(this->partitioner_fine, + external_partitioner_fine)) { this->vec_fine.reinit(0); - for (auto &i : level_dof_indices_fine) + for (auto &i : this->level_dof_indices_fine) i = external_partitioner_fine->global_to_local( this->partitioner_fine->local_to_global(i)); this->partitioner_fine_embedded = - create_embedded_partitioner(this->partitioner_fine, - external_partitioner_fine); + internal::create_embedded_partitioner(this->partitioner_fine, + external_partitioner_fine); this->partitioner_fine = external_partitioner_fine; } } +template +void +MGTwoLevelTransfer>:: + enable_inplace_operations_if_possible( + const std::shared_ptr + &external_partitioner_coarse, + const std::shared_ptr + &external_partitioner_fine) +{ + this->internal_enable_inplace_operations_if_possible( + external_partitioner_coarse, external_partitioner_fine, constraint_info); +} + template @@ -3294,12 +3354,12 @@ MGTwoLevelTransfer>:: size += scheme.restriction_matrix_1d.memory_consumption(); } - size += partitioner_fine->memory_consumption(); - size += partitioner_coarse->memory_consumption(); - size += vec_fine.memory_consumption(); - size += vec_coarse.memory_consumption(); + size += this->partitioner_fine->memory_consumption(); + size += this->partitioner_coarse->memory_consumption(); + size += this->vec_fine.memory_consumption(); + size += this->vec_coarse.memory_consumption(); size += MemoryConsumption::memory_consumption(weights); - size += MemoryConsumption::memory_consumption(level_dof_indices_fine); + size += MemoryConsumption::memory_consumption(this->level_dof_indices_fine); size += constraint_info.memory_consumption(); return size; @@ -3307,21 +3367,21 @@ MGTwoLevelTransfer>:: -template +template void -MGTwoLevelTransfer>:: +MGTwoLevelTransferBase>:: update_ghost_values( const LinearAlgebra::distributed::Vector &vec) const { - if ((vec.get_partitioner().get() == partitioner_coarse.get()) && - (partitioner_coarse_embedded != nullptr)) - internal::SimpleVectorDataExchange(partitioner_coarse_embedded, - buffer_coarse_embedded) + if ((vec.get_partitioner().get() == this->partitioner_coarse.get()) && + (this->partitioner_coarse_embedded != nullptr)) + internal::SimpleVectorDataExchange( + this->partitioner_coarse_embedded, this->buffer_coarse_embedded) .update_ghost_values(vec); - else if ((vec.get_partitioner().get() == partitioner_fine.get()) && - (partitioner_fine_embedded != nullptr)) - internal::SimpleVectorDataExchange(partitioner_fine_embedded, - buffer_fine_embedded) + else if ((vec.get_partitioner().get() == this->partitioner_fine.get()) && + (this->partitioner_fine_embedded != nullptr)) + internal::SimpleVectorDataExchange(this->partitioner_fine_embedded, + this->buffer_fine_embedded) .update_ghost_values(vec); else vec.update_ghost_values(); @@ -3329,23 +3389,23 @@ MGTwoLevelTransfer>:: -template +template void -MGTwoLevelTransfer>::compress( +MGTwoLevelTransferBase>::compress( LinearAlgebra::distributed::Vector &vec, const VectorOperation::values op) const { Assert(op == VectorOperation::add, ExcNotImplemented()); - if ((vec.get_partitioner().get() == partitioner_coarse.get()) && - (partitioner_coarse_embedded != nullptr)) - internal::SimpleVectorDataExchange(partitioner_coarse_embedded, - buffer_coarse_embedded) + if ((vec.get_partitioner().get() == this->partitioner_coarse.get()) && + (this->partitioner_coarse_embedded != nullptr)) + internal::SimpleVectorDataExchange( + this->partitioner_coarse_embedded, this->buffer_coarse_embedded) .compress(vec); - else if ((vec.get_partitioner().get() == partitioner_fine.get()) && - (partitioner_fine_embedded != nullptr)) - internal::SimpleVectorDataExchange(partitioner_fine_embedded, - buffer_fine_embedded) + else if ((vec.get_partitioner().get() == this->partitioner_fine.get()) && + (this->partitioner_fine_embedded != nullptr)) + internal::SimpleVectorDataExchange(this->partitioner_fine_embedded, + this->buffer_fine_embedded) .compress(vec); else vec.compress(op); @@ -3353,21 +3413,21 @@ MGTwoLevelTransfer>::compress( -template +template void -MGTwoLevelTransfer>:: +MGTwoLevelTransferBase>:: zero_out_ghost_values( const LinearAlgebra::distributed::Vector &vec) const { - if ((vec.get_partitioner().get() == partitioner_coarse.get()) && - (partitioner_coarse_embedded != nullptr)) - internal::SimpleVectorDataExchange(partitioner_coarse_embedded, - buffer_coarse_embedded) + if ((vec.get_partitioner().get() == this->partitioner_coarse.get()) && + (this->partitioner_coarse_embedded != nullptr)) + internal::SimpleVectorDataExchange( + this->partitioner_coarse_embedded, this->buffer_coarse_embedded) .zero_out_ghost_values(vec); - else if ((vec.get_partitioner().get() == (partitioner_fine.get()) && - partitioner_fine_embedded != nullptr)) - internal::SimpleVectorDataExchange(partitioner_fine_embedded, - buffer_fine_embedded) + else if ((vec.get_partitioner().get() == (this->partitioner_fine.get()) && + this->partitioner_fine_embedded != nullptr)) + internal::SimpleVectorDataExchange(this->partitioner_fine_embedded, + this->buffer_fine_embedded) .zero_out_ghost_values(vec); else vec.zero_out_ghost_values();