From: Martin Kronbichler Date: Wed, 18 Sep 2019 20:45:46 +0000 (+0200) Subject: Allow multigrid transfer to use external vector partitioners X-Git-Tag: v9.2.0-rc1~883^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f4d3e830f4d7c70259830f27fa00d134034bc128;p=dealii.git Allow multigrid transfer to use external vector partitioners --- diff --git a/include/deal.II/multigrid/mg_transfer_internal.h b/include/deal.II/multigrid/mg_transfer_internal.h index e90e7ca3da..64e310ac23 100644 --- a/include/deal.II/multigrid/mg_transfer_internal.h +++ b/include/deal.II/multigrid/mg_transfer_internal.h @@ -120,8 +120,10 @@ namespace internal template void setup_transfer( - const DoFHandler & mg_dof, - const MGConstrainedDoFs * mg_constrained_dofs, + const DoFHandler & mg_dof, + const MGConstrainedDoFs *mg_constrained_dofs, + const std::vector> + & external_partitioners, ElementInfo & elem_info, std::vector> &level_dof_indices, std::vector>> @@ -130,8 +132,8 @@ namespace internal std::vector>> &dirichlet_indices, std::vector> & weights_on_refined, std::vector> ©_indices_global_mine, - MGLevelObject> - &ghosted_level_vector); + MGLevelObject> + &vector_partitioners); } // namespace MGTransfer } // namespace internal diff --git a/include/deal.II/multigrid/mg_transfer_matrix_free.h b/include/deal.II/multigrid/mg_transfer_matrix_free.h index 111ca464bf..a3d02ee30c 100644 --- a/include/deal.II/multigrid/mg_transfer_matrix_free.h +++ b/include/deal.II/multigrid/mg_transfer_matrix_free.h @@ -89,9 +89,24 @@ public: /** * Actually build the information for the prolongation for each level. + * + * The optional second argument of external partitioners allows the user to + * suggest vector partitioning on the levels. In case the partitioners + * are found to contain all ghost unknowns that are visited through the + * transfer, the given partitioners are chosen. This ensures compatibility + * of vectors during prolongate and restrict with external partitioners as + * given by the user, which in turn saves some copy operations. However, in + * case there are unknowns missing -- and this is typically the case at some + * point during h-coarsening since processors will need to drop out and + * thus children's unknowns on some processor will be needed as ghosts to a + * parent cell on another processor -- the provided external partitioners are + * ignored and internal variants are used instead. */ void - build(const DoFHandler &mg_dof); + build(const DoFHandler &mg_dof, + const std::vector> + &external_partitioners = + std::vector>()); /** * Prolongate a vector from level to_level-1 to level @@ -245,6 +260,15 @@ private: */ std::vector>> dirichlet_indices; + /** + * A vector that holds shared pointers to the partitioners of the + * transfer. These partitioners might be shared with what was passed in from + * the outside through build() or be shared with the level vectors inherited + * from MGLevelGlobalTransfer. + */ + MGLevelObject> + vector_partitioners; + /** * Perform the prolongation operation. */ diff --git a/source/multigrid/mg_level_global_transfer.cc b/source/multigrid/mg_level_global_transfer.cc index 633b4ff980..964f1a9d13 100644 --- a/source/multigrid/mg_level_global_transfer.cc +++ b/source/multigrid/mg_level_global_transfer.cc @@ -300,15 +300,39 @@ MGLevelGlobalTransfer>:: ghosted_global_vector, ghosted_level_vector); - fill_internal(mg_dof, - mg_constrained_dofs, - mpi_communicator, - true, - this->solution_copy_indices, - this->solution_copy_indices_global_mine, - this->solution_copy_indices_level_mine, - solution_ghosted_global_vector, - solution_ghosted_level_vector); + // in case we have hanging nodes which imply different ownership of + // global and level dof indices, we must also fill the solution indices + // which contain additional indices, otherwise they can re-use the + // indices of the standard transfer. + int have_refinement_edge_dofs = 0; + if (mg_constrained_dofs != nullptr) + for (unsigned int level = 0; + level < mg_dof.get_triangulation().n_global_levels(); + ++level) + if (mg_constrained_dofs->get_refinement_edge_indices(level).n_elements() > + 0) + { + have_refinement_edge_dofs = 1; + break; + } + if (Utilities::MPI::max(have_refinement_edge_dofs, mpi_communicator) == 1) + fill_internal(mg_dof, + mg_constrained_dofs, + mpi_communicator, + true, + this->solution_copy_indices, + this->solution_copy_indices_global_mine, + this->solution_copy_indices_level_mine, + solution_ghosted_global_vector, + solution_ghosted_level_vector); + else + { + this->solution_copy_indices = this->copy_indices; + this->solution_copy_indices_global_mine = this->copy_indices_global_mine; + this->solution_copy_indices_level_mine = this->copy_indices_level_mine; + solution_ghosted_global_vector = ghosted_global_vector; + solution_ghosted_level_vector = ghosted_level_vector; + } bool my_perform_renumbered_plain_copy = (this->copy_indices.back().n_cols() == diff --git a/source/multigrid/mg_transfer_internal.cc b/source/multigrid/mg_transfer_internal.cc index a11aa5b66c..57135a5699 100644 --- a/source/multigrid/mg_transfer_internal.cc +++ b/source/multigrid/mg_transfer_internal.cc @@ -222,7 +222,8 @@ namespace internal "We should only be sending information with a parallel Triangulation!")); #ifdef DEAL_II_WITH_MPI - if (tria) + if (tria && Utilities::MPI::sum(send_data_temp.size(), + tria->get_communicator()) > 0) { const std::set &neighbors = tria->level_ghost_owners(); @@ -419,14 +420,15 @@ namespace internal // initialize the vectors needed for the transfer (and merge with the // content in copy_indices_global_mine) - template void - reinit_ghosted_vector( - const IndexSet & locally_owned, - std::vector & ghosted_level_dofs, - const MPI_Comm & communicator, - LinearAlgebra::distributed::Vector &ghosted_level_vector, - Table<2, unsigned int> & copy_indices_global_mine) + reinit_level_partitioner( + const IndexSet & locally_owned, + std::vector &ghosted_level_dofs, + const std::shared_ptr + & external_partitioner, + const MPI_Comm & communicator, + std::shared_ptr &target_partitioner, + Table<2, unsigned int> ©_indices_global_mine) { std::sort(ghosted_level_dofs.begin(), ghosted_level_dofs.end()); IndexSet ghosted_dofs(locally_owned.size()); @@ -436,21 +438,52 @@ namespace internal ghosted_dofs.compress(); // Add possible ghosts from the previous content in the vector - if (ghosted_level_vector.size() == locally_owned.size()) + if (target_partitioner.get() != nullptr && + target_partitioner->size() == locally_owned.size()) + { + ghosted_dofs.add_indices(target_partitioner->ghost_indices()); + } + + // check if the given partitioner's ghosts represent a superset of the + // ghosts we require in this function + const int ghosts_locally_contained = + (external_partitioner.get() != nullptr && + (external_partitioner->ghost_indices() & ghosted_dofs) == + ghosted_dofs) ? + 1 : + 0; + if (external_partitioner.get() != nullptr && + Utilities::MPI::min(ghosts_locally_contained, communicator) == 1) { // shift the local number of the copy indices according to the new - // partitioner that we are going to use for the vector - const auto &part = ghosted_level_vector.get_partitioner(); - ghosted_dofs.add_indices(part->ghost_indices()); - for (unsigned int i = 0; i < copy_indices_global_mine.n_cols(); ++i) - copy_indices_global_mine(1, i) = - locally_owned.n_elements() + - ghosted_dofs.index_within_set( - part->local_to_global(copy_indices_global_mine(1, i))); + // partitioner that we are going to use during the access to the + // entries + if (target_partitioner.get() != nullptr && + target_partitioner->size() == locally_owned.size()) + for (unsigned int i = 0; i < copy_indices_global_mine.n_cols(); ++i) + copy_indices_global_mine(1, i) = + external_partitioner->global_to_local( + target_partitioner->local_to_global( + copy_indices_global_mine(1, i))); + target_partitioner = external_partitioner; + } + else + { + if (target_partitioner.get() != nullptr && + target_partitioner->size() == locally_owned.size()) + for (unsigned int i = 0; i < copy_indices_global_mine.n_cols(); ++i) + copy_indices_global_mine(1, i) = + locally_owned.n_elements() + + ghosted_dofs.index_within_set( + target_partitioner->local_to_global( + copy_indices_global_mine(1, i))); + target_partitioner.reset(new Utilities::MPI::Partitioner( + locally_owned, ghosted_dofs, communicator)); } - ghosted_level_vector.reinit(locally_owned, ghosted_dofs, communicator); } + + // Transform the ghost indices to local index space for the vector inline void copy_indices_to_mpi_local_numbers( @@ -470,6 +503,8 @@ namespace internal localized_indices[i + mine.size()] = part.global_to_local(remote[i]); } + + // given the collection of child cells in lexicographic ordering as seen // from the parent, compute the first index of the given child template @@ -498,6 +533,8 @@ namespace internal return shift; } + + // puts the indices on the given child cell in lexicographic ordering with // respect to the collection of all child cells as seen from the parent template @@ -535,6 +572,8 @@ namespace internal } } + + template void setup_element_info(ElementInfo & elem_info, @@ -596,6 +635,8 @@ namespace internal fe.get_prolongation_matrix(c)(renumbering[j], renumbering[i]); } + + namespace { /** @@ -627,13 +668,17 @@ namespace internal } } // namespace + + // Sets up most of the internal data structures of the MGTransferMatrixFree // class template void setup_transfer( - const dealii::DoFHandler & mg_dof, - const MGConstrainedDoFs * mg_constrained_dofs, + const dealii::DoFHandler &mg_dof, + const MGConstrainedDoFs * mg_constrained_dofs, + const std::vector> + & external_partitioners, ElementInfo & elem_info, std::vector> &level_dof_indices, std::vector>> @@ -642,8 +687,8 @@ namespace internal std::vector>> &dirichlet_indices, std::vector> & weights_on_refined, std::vector> ©_indices_global_mine, - MGLevelObject> - &ghosted_level_vector) + MGLevelObject> + &target_partitioners) { level_dof_indices.clear(); parent_child_connect.clear(); @@ -691,11 +736,10 @@ namespace internal mg_dof.get_fe().dofs_per_cell); dirichlet_indices.resize(n_levels - 1); - // We use the vectors stored ghosted_level_vector in the base class for - // keeping ghosted transfer indices. To avoid keeping two very similar - // vectors, we merge them here. - if (ghosted_level_vector.max_level() != n_levels - 1) - ghosted_level_vector.resize(0, n_levels - 1); + AssertDimension(target_partitioners.max_level(), n_levels - 1); + Assert(external_partitioners.empty() || + external_partitioners.size() == n_levels, + ExcDimensionMismatch(external_partitioners.size(), n_levels)); for (unsigned int level = n_levels - 1; level > 0; --level) { @@ -892,38 +936,48 @@ namespace internal i->first += counter; } - // step 2.7: Initialize the ghosted vector + // step 2.7: Initialize the partitioner for the ghosted vector + // + // We use a vector based on the target partitioner handed in also in + // the base class for keeping ghosted transfer indices. To avoid + // keeping two very similar vectors, we keep one single ghosted + // vector that is augmented/filled here. const parallel::TriangulationBase *ptria = (dynamic_cast *>( &tria)); const MPI_Comm communicator = ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF; - reinit_ghosted_vector(mg_dof.locally_owned_mg_dofs(level), - ghosted_level_dofs, - communicator, - ghosted_level_vector[level], - copy_indices_global_mine[level]); - - copy_indices_to_mpi_local_numbers( - *ghosted_level_vector[level].get_partitioner(), - global_level_dof_indices, - global_level_dof_indices_remote, - level_dof_indices[level]); + reinit_level_partitioner(mg_dof.locally_owned_mg_dofs(level), + ghosted_level_dofs, + external_partitioners.empty() ? + nullptr : + external_partitioners[level], + communicator, + target_partitioners[level], + copy_indices_global_mine[level]); + + copy_indices_to_mpi_local_numbers(*target_partitioners[level], + global_level_dof_indices, + global_level_dof_indices_remote, + level_dof_indices[level]); // step 2.8: Initialize the ghosted vector for level 0 if (level == 1) { for (unsigned int i = 0; i < parent_child_connect[0].size(); ++i) parent_child_connect[0][i] = std::make_pair(i, 0U); - reinit_ghosted_vector(mg_dof.locally_owned_mg_dofs(0), - ghosted_level_dofs_l0, - communicator, - ghosted_level_vector[0], - copy_indices_global_mine[0]); + reinit_level_partitioner(mg_dof.locally_owned_mg_dofs(0), + ghosted_level_dofs_l0, + external_partitioners.empty() ? + nullptr : + external_partitioners[0], + communicator, + target_partitioners[0], + copy_indices_global_mine[0]); copy_indices_to_mpi_local_numbers( - *ghosted_level_vector[0].get_partitioner(), + *target_partitioners[0], global_level_dof_indices_l0, std::vector(), level_dof_indices[0]); @@ -940,14 +994,15 @@ namespace internal weights_on_refined.resize(n_levels - 1); for (unsigned int level = 1; level < n_levels; ++level) { - ghosted_level_vector[level] = 0; + LinearAlgebra::distributed::Vector touch_count( + target_partitioners[level]); for (unsigned int c = 0; c < n_owned_level_cells[level - 1]; ++c) for (unsigned int j = 0; j < elem_info.n_child_cell_dofs; ++j) - ghosted_level_vector[level].local_element( + touch_count.local_element( level_dof_indices[level][elem_info.n_child_cell_dofs * c + j]) += Number(1.); - ghosted_level_vector[level].compress(VectorOperation::add); - ghosted_level_vector[level].update_ghost_values(); + touch_count.compress(VectorOperation::add); + touch_count.update_ghost_values(); std::vector degree_to_3(n_child_dofs_1d); degree_to_3[0] = 0; @@ -970,7 +1025,7 @@ namespace internal 1][c * Utilities::fixed_power(3) + shift + degree_to_3[i]] = Number(1.) / - ghosted_level_vector[level].local_element( + touch_count.local_element( level_dof_indices[level] [elem_info.n_child_cell_dofs * c + m]); } diff --git a/source/multigrid/mg_transfer_internal.inst.in b/source/multigrid/mg_transfer_internal.inst.in index 204503303a..0c8841afbf 100644 --- a/source/multigrid/mg_transfer_internal.inst.in +++ b/source/multigrid/mg_transfer_internal.inst.in @@ -79,6 +79,8 @@ for (deal_II_dimension : DIMENSIONS; S : REAL_SCALARS) setup_transfer( const dealii::DoFHandler &, const MGConstrainedDoFs *, + const std::vector> + &, ElementInfo &, std::vector> &, std::vector>> &, @@ -86,7 +88,7 @@ for (deal_II_dimension : DIMENSIONS; S : REAL_SCALARS) std::vector>> &, std::vector> &, std::vector> &, - MGLevelObject> &); + MGLevelObject> &); \} \} } diff --git a/source/multigrid/mg_transfer_matrix_free.cc b/source/multigrid/mg_transfer_matrix_free.cc index 769c532e8c..e7e087e2b1 100644 --- a/source/multigrid/mg_transfer_matrix_free.cc +++ b/source/multigrid/mg_transfer_matrix_free.cc @@ -94,10 +94,20 @@ MGTransferMatrixFree::clear() template void -MGTransferMatrixFree::build(const DoFHandler &mg_dof) +MGTransferMatrixFree::build( + const DoFHandler &mg_dof, + const std::vector> + &external_partitioners) { this->fill_and_communicate_copy_indices(mg_dof); + vector_partitioners.resize(0, + mg_dof.get_triangulation().n_global_levels() - 1); + for (unsigned int level = 0; level <= this->ghosted_level_vector.max_level(); + ++level) + vector_partitioners[level] = + this->ghosted_level_vector[level].get_partitioner(); + std::vector> weights_unvectorized; internal::MGTransfer::ElementInfo elem_info; @@ -105,6 +115,7 @@ MGTransferMatrixFree::build(const DoFHandler &mg_dof) internal::MGTransfer::setup_transfer( mg_dof, this->mg_constrained_dofs, + external_partitioners, elem_info, level_dof_indices, parent_child_connect, @@ -112,7 +123,19 @@ MGTransferMatrixFree::build(const DoFHandler &mg_dof) dirichlet_indices, weights_unvectorized, this->copy_indices_global_mine, - this->ghosted_level_vector); + vector_partitioners); + + // reinit the ghosted level vector in case its partitioner differs from the + // one the user intends to use externally + this->ghosted_level_vector.resize(0, vector_partitioners.max_level()); + for (unsigned int level = 0; level <= vector_partitioners.max_level(); + ++level) + if (external_partitioners.size() == vector_partitioners.max_level() + 1 && + external_partitioners[level].get() == vector_partitioners[level].get()) + this->ghosted_level_vector[level].reinit(0); + else + this->ghosted_level_vector[level].reinit(vector_partitioners[level]); + // unpack element info data fe_degree = elem_info.fe_degree; element_is_continuous = elem_info.element_is_continuous; @@ -163,69 +186,73 @@ MGTransferMatrixFree::prolongate( Assert((to_level >= 1) && (to_level <= level_dof_indices.size()), ExcIndexRange(to_level, 1, level_dof_indices.size() + 1)); - AssertDimension(this->ghosted_level_vector[to_level].local_size(), - dst.local_size()); - AssertDimension(this->ghosted_level_vector[to_level - 1].local_size(), - src.local_size()); + const bool src_inplace = src.get_partitioner().get() == + this->vector_partitioners[to_level - 1].get(); + if (src_inplace == false) + { + if (this->ghosted_level_vector[to_level - 1].get_partitioner().get() != + this->vector_partitioners[to_level - 1].get()) + this->ghosted_level_vector[to_level - 1].reinit( + this->vector_partitioners[to_level - 1]); + this->ghosted_level_vector[to_level - 1].copy_locally_owned_data_from( + src); + } + + const bool dst_inplace = + dst.get_partitioner().get() == this->vector_partitioners[to_level].get(); + if (dst_inplace == false) + { + if (this->ghosted_level_vector[to_level].get_partitioner().get() != + this->vector_partitioners[to_level].get()) + this->ghosted_level_vector[to_level].reinit( + this->vector_partitioners[to_level]); + AssertDimension(this->ghosted_level_vector[to_level].local_size(), + dst.local_size()); + this->ghosted_level_vector[to_level] = 0.; + } + else + dst = 0; - this->ghosted_level_vector[to_level - 1].copy_locally_owned_data_from(src); - this->ghosted_level_vector[to_level - 1].update_ghost_values(); - this->ghosted_level_vector[to_level] = 0.; + const LinearAlgebra::distributed::Vector &src_vec = + src_inplace ? src : this->ghosted_level_vector[to_level - 1]; + LinearAlgebra::distributed::Vector &dst_vec = + dst_inplace ? dst : this->ghosted_level_vector[to_level]; + src_vec.update_ghost_values(); // the implementation in do_prolongate_add is templated in the degree of the // element (for efficiency reasons), so we need to find the appropriate // kernel here... if (fe_degree == 0) - do_prolongate_add<0>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<0>(to_level, dst_vec, src_vec); else if (fe_degree == 1) - do_prolongate_add<1>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<1>(to_level, dst_vec, src_vec); else if (fe_degree == 2) - do_prolongate_add<2>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<2>(to_level, dst_vec, src_vec); else if (fe_degree == 3) - do_prolongate_add<3>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<3>(to_level, dst_vec, src_vec); else if (fe_degree == 4) - do_prolongate_add<4>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<4>(to_level, dst_vec, src_vec); else if (fe_degree == 5) - do_prolongate_add<5>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<5>(to_level, dst_vec, src_vec); else if (fe_degree == 6) - do_prolongate_add<6>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<6>(to_level, dst_vec, src_vec); else if (fe_degree == 7) - do_prolongate_add<7>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<7>(to_level, dst_vec, src_vec); else if (fe_degree == 8) - do_prolongate_add<8>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<8>(to_level, dst_vec, src_vec); else if (fe_degree == 9) - do_prolongate_add<9>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<9>(to_level, dst_vec, src_vec); else if (fe_degree == 10) - do_prolongate_add<10>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<10>(to_level, dst_vec, src_vec); else - do_prolongate_add<-1>(to_level, - this->ghosted_level_vector[to_level], - this->ghosted_level_vector[to_level - 1]); + do_prolongate_add<-1>(to_level, dst_vec, src_vec); - this->ghosted_level_vector[to_level].compress(VectorOperation::add); - dst.copy_locally_owned_data_from(this->ghosted_level_vector[to_level]); + dst_vec.compress(VectorOperation::add); + if (dst_inplace == false) + dst = dst_vec; + + if (src_inplace == true) + src.zero_out_ghosts(); } @@ -240,67 +267,69 @@ MGTransferMatrixFree::restrict_and_add( Assert((from_level >= 1) && (from_level <= level_dof_indices.size()), ExcIndexRange(from_level, 1, level_dof_indices.size() + 1)); - AssertDimension(this->ghosted_level_vector[from_level].local_size(), - src.local_size()); - AssertDimension(this->ghosted_level_vector[from_level - 1].local_size(), - dst.local_size()); + const bool src_inplace = + src.get_partitioner().get() == this->vector_partitioners[from_level].get(); + if (src_inplace == false) + { + if (this->ghosted_level_vector[from_level].get_partitioner().get() != + this->vector_partitioners[from_level].get()) + this->ghosted_level_vector[from_level].reinit( + this->vector_partitioners[from_level]); + this->ghosted_level_vector[from_level].copy_locally_owned_data_from(src); + } + + const bool dst_inplace = dst.get_partitioner().get() == + this->vector_partitioners[from_level - 1].get(); + if (dst_inplace == false) + { + if (this->ghosted_level_vector[from_level - 1].get_partitioner().get() != + this->vector_partitioners[from_level - 1].get()) + this->ghosted_level_vector[from_level - 1].reinit( + this->vector_partitioners[from_level - 1]); + AssertDimension(this->ghosted_level_vector[from_level - 1].local_size(), + dst.local_size()); + this->ghosted_level_vector[from_level - 1] = 0.; + } - this->ghosted_level_vector[from_level].copy_locally_owned_data_from(src); - this->ghosted_level_vector[from_level].update_ghost_values(); - this->ghosted_level_vector[from_level - 1] = 0.; + const LinearAlgebra::distributed::Vector &src_vec = + src_inplace ? src : this->ghosted_level_vector[from_level]; + LinearAlgebra::distributed::Vector &dst_vec = + dst_inplace ? dst : this->ghosted_level_vector[from_level - 1]; + + src_vec.update_ghost_values(); if (fe_degree == 0) - do_restrict_add<0>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<0>(from_level, dst_vec, src_vec); else if (fe_degree == 1) - do_restrict_add<1>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<1>(from_level, dst_vec, src_vec); else if (fe_degree == 2) - do_restrict_add<2>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<2>(from_level, dst_vec, src_vec); else if (fe_degree == 3) - do_restrict_add<3>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<3>(from_level, dst_vec, src_vec); else if (fe_degree == 4) - do_restrict_add<4>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<4>(from_level, dst_vec, src_vec); else if (fe_degree == 5) - do_restrict_add<5>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<5>(from_level, dst_vec, src_vec); else if (fe_degree == 6) - do_restrict_add<6>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<6>(from_level, dst_vec, src_vec); else if (fe_degree == 7) - do_restrict_add<7>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<7>(from_level, dst_vec, src_vec); else if (fe_degree == 8) - do_restrict_add<8>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<8>(from_level, dst_vec, src_vec); else if (fe_degree == 9) - do_restrict_add<9>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<9>(from_level, dst_vec, src_vec); else if (fe_degree == 10) - do_restrict_add<10>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<10>(from_level, dst_vec, src_vec); else // go to the non-templated version of the evaluator - do_restrict_add<-1>(from_level, - this->ghosted_level_vector[from_level - 1], - this->ghosted_level_vector[from_level]); + do_restrict_add<-1>(from_level, dst_vec, src_vec); + + dst_vec.compress(VectorOperation::add); + if (dst_inplace == false) + dst += dst_vec; - this->ghosted_level_vector[from_level - 1].compress(VectorOperation::add); - dst += this->ghosted_level_vector[from_level - 1]; + if (src_inplace == true) + src.zero_out_ghosts(); }