From 70e2340bb37de0388c46f31c874a26805847ae5e Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Tue, 24 Oct 2017 12:38:27 +0200 Subject: [PATCH] Avoid VectorView in favor of copy_locally_owned_data_from. Optimize copy_to/from_mg for another frequent case. --- include/deal.II/multigrid/mg_transfer.h | 8 ++++ .../deal.II/multigrid/mg_transfer.templates.h | 42 ++++++++++++------- source/multigrid/mg_level_global_transfer.cc | 25 +++++++---- 3 files changed, 53 insertions(+), 22 deletions(-) diff --git a/include/deal.II/multigrid/mg_transfer.h b/include/deal.II/multigrid/mg_transfer.h index 99fa1d8085..8b616434db 100644 --- a/include/deal.II/multigrid/mg_transfer.h +++ b/include/deal.II/multigrid/mg_transfer.h @@ -447,6 +447,14 @@ protected: */ bool perform_plain_copy; + /** + * Stores whether the copy operation from the global to the level vector is + * actually a plain copy to the finest level except for a renumbering within + * the finest level of the degrees of freedom. This means that the grid has + * no adaptive refinement. + */ + bool perform_renumbered_plain_copy; + /** * The vector that stores what has been given to the * set_component_to_block_map() function. diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index 2346866b7b..d7ba1b96b9 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -376,7 +376,7 @@ template template void MGLevelGlobalTransfer >::copy_to_mg -(const DoFHandler &mg_dof_handler, +(const DoFHandler &mg_dof_handler, MGLevelObject > &dst, const LinearAlgebra::distributed::Vector &src, const bool solution_transfer) const @@ -394,12 +394,17 @@ MGLevelGlobalTransfer >::copy_to_mg if (perform_plain_copy) { - // In this case, we can simply copy the local range (in parallel by - // VectorView) + // In this case, we can simply copy the local range. AssertDimension(dst[dst.max_level()].local_size(), src.local_size()); - VectorView dst_view (src.local_size(), dst[dst.max_level()].begin()); - VectorView src_view (src.local_size(), src.begin()); - static_cast &>(dst_view) = static_cast &>(src_view); + dst[dst.max_level()].copy_locally_owned_data_from(src); + return; + } + else if (perform_renumbered_plain_copy) + { + Assert(copy_indices_level_mine.back().empty(), ExcInternalError()); + LinearAlgebra::distributed::Vector &dst_level = dst[dst.max_level()]; + for (std::pair i : this_copy_indices.back()) + dst_level.local_element(i.second) = src.local_element(i.first); return; } @@ -440,7 +445,7 @@ template template void MGLevelGlobalTransfer >::copy_from_mg -(const DoFHandler &mg_dof_handler, +(const DoFHandler &mg_dof_handler, LinearAlgebra::distributed::Vector &dst, const MGLevelObject > &src) const { @@ -449,14 +454,21 @@ MGLevelGlobalTransfer >::copy_from_mg AssertIndexRange(src.min_level(), src.max_level()+1); if (perform_plain_copy) { - // In this case, we can simply copy the local range (in parallel by - // VectorView). To avoid having stray data in ghost entries of the - // destination, make sure to clear them here. + // In this case, we can simply copy the local range. To avoid having + // stray data in ghost entries of the destination, make sure to clear + // them here. + dst.zero_out_ghosts(); + AssertDimension(src[src.max_level()].local_size(), dst.local_size()); + dst.copy_locally_owned_data_from(src[src.max_level()]); + return; + } + else if (perform_renumbered_plain_copy) + { + Assert(copy_indices_global_mine.back().empty(), ExcInternalError()); + const LinearAlgebra::distributed::Vector &src_level = src[src.max_level()]; dst.zero_out_ghosts(); - AssertDimension(dst.local_size(), src[src.max_level()].local_size()); - VectorView dst_view (dst.local_size(), dst.begin()); - VectorView src_view (dst.local_size(), src[src.max_level()].begin()); - static_cast &>(dst_view) = static_cast &>(src_view); + for (std::pair i : copy_indices.back()) + dst.local_element(i.first) = src_level.local_element(i.second); return; } @@ -501,7 +513,7 @@ template template void MGLevelGlobalTransfer >::copy_from_mg_add -(const DoFHandler &/*mg_dof_handler*/, +(const DoFHandler &/*mg_dof_handler*/, LinearAlgebra::distributed::Vector &dst, const MGLevelObject > &src) const { diff --git a/source/multigrid/mg_level_global_transfer.cc b/source/multigrid/mg_level_global_transfer.cc index d7c8ffdb3f..e6eaf239ed 100644 --- a/source/multigrid/mg_level_global_transfer.cc +++ b/source/multigrid/mg_level_global_transfer.cc @@ -51,7 +51,7 @@ MGLevelGlobalTransfer::fill_and_communicate_copy_indices copy_indices_global_mine, copy_indices_level_mine); // check if we can run a plain copy operation between the global DoFs and - // the finest level, on the current processor. + // the finest level. bool my_perform_plain_copy = (copy_indices.back().size() == mg_dof.locally_owned_dofs().n_elements()) @@ -100,6 +100,7 @@ MGLevelGlobalTransfer::clear() copy_indices_level_mine.clear(); component_to_block_map.resize(0); mg_constrained_dofs = nullptr; + perform_plain_copy = false; } @@ -286,10 +287,12 @@ MGLevelGlobalTransfer >::fill_and_com solution_ghosted_global_vector, solution_ghosted_level_vector); - perform_plain_copy = this->copy_indices.back().size() - == mg_dof.locally_owned_dofs().n_elements(); - if (perform_plain_copy) + bool my_perform_renumbered_plain_copy = (this->copy_indices.back().size() == + mg_dof.locally_owned_dofs().n_elements()); + bool my_perform_plain_copy = false; + if (my_perform_renumbered_plain_copy) { + my_perform_plain_copy = true; AssertDimension(this->copy_indices_global_mine.back().size(), 0); AssertDimension(this->copy_indices_level_mine.back().size(), 0); @@ -300,16 +303,22 @@ MGLevelGlobalTransfer >::fill_and_com if (this->copy_indices.back()[i].first != this->copy_indices.back()[i].second) { - perform_plain_copy = false; + my_perform_plain_copy = false; break; } } + + // now do a global reduction over all processors to see what operation + // they can agree upon perform_plain_copy = - Utilities::MPI::min(static_cast(perform_plain_copy), + Utilities::MPI::min(static_cast(my_perform_plain_copy), + mpi_communicator); + perform_renumbered_plain_copy = + Utilities::MPI::min(static_cast(my_perform_renumbered_plain_copy), mpi_communicator); // if we do a plain copy, no need to hold additional ghosted vectors - if (perform_plain_copy) + if (perform_renumbered_plain_copy) { ghosted_global_vector.reinit(0); ghosted_level_vector.resize(0, 0); @@ -332,6 +341,8 @@ MGLevelGlobalTransfer >::clear() mg_constrained_dofs = nullptr; ghosted_global_vector.reinit(0); ghosted_level_vector.resize(0, 0); + perform_plain_copy = false; + perform_renumbered_plain_copy = false; } -- 2.39.5