From ca18f5aaccc84c7c09b39df8891a6922632be633 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Tue, 26 Mar 2019 22:10:05 -0400 Subject: [PATCH] Rename parallel::distributed::SolutionTransfer::prepare_serialization() --- doc/news/changes/minor/20190326Arndt | 5 +++ .../deal.II/distributed/solution_transfer.h | 32 +++++++++++++++---- source/distributed/solution_transfer.cc | 23 ++++++++++++- tests/mpi/p4est_save_02.cc | 2 +- tests/mpi/p4est_save_03.cc | 4 +-- tests/mpi/p4est_save_04.cc | 2 +- tests/mpi/p4est_save_06.cc | 2 +- 7 files changed, 57 insertions(+), 13 deletions(-) create mode 100644 doc/news/changes/minor/20190326Arndt diff --git a/doc/news/changes/minor/20190326Arndt b/doc/news/changes/minor/20190326Arndt new file mode 100644 index 0000000000..8e9c7ef382 --- /dev/null +++ b/doc/news/changes/minor/20190326Arndt @@ -0,0 +1,5 @@ +Changed: parallel::distributed::SolutionTransfer::prepare_serialization() has +been deprecated in favor of +parallel::distributed::SolutionTransfer::prepare_for_serialization(). +Note on ghost elements In a parallel computation PETSc or * Trilinos vector may contain ghost elements or not. For reading in * information with prepare_for_coarsening_and_refinement() or - * prepare_serialization() you need to supply vectors with ghost elements, - * so that all locally_active elements can be read. On the other hand, - * ghosted vectors are generally not writable, so for calls to + * prepare_for_serialization() you need to supply vectors with ghost + * elements, so that all locally_active elements can be read. On the other + * hand, ghosted vectors are generally not writable, so for calls to * interpolate() or deserialize() you need to supply distributed vectors * without ghost elements. More precisely, during interpolation the * current algorithm writes into all locally active degrees of freedom. @@ -133,7 +133,7 @@ namespace parallel * @code * parallel::distributed::SolutionTransfer * sol_trans(dof_handler); - * sol_trans.prepare_serialization (vector); + * sol_trans.prepare_for_serialization (vector); * * triangulation.save(filename); * @endcode @@ -186,7 +186,7 @@ namespace parallel * sol_trans(hp_dof_handler); * * hp_dof_handler.prepare_for_serialization_of_active_fe_indices(); - * sol_trans.prepare_serialization(vector); + * sol_trans.prepare_for_serialization(vector); * * triangulation.save(filename); * @endcode @@ -292,24 +292,42 @@ namespace parallel void interpolate(VectorType &out); + /** + * Prepare the serialization of the given vector. The serialization is + * done by Triangulation::save(). The given vector needs all information + * on the locally active DoFs (it must be ghosted). See documentation of + * this class for more information. + */ + void + prepare_for_serialization(const VectorType &in); + + /** + * Same as the function above, only for a list of vectors. + */ + void + prepare_for_serialization(const std::vector &all_in); /** * Prepare the serialization of the given vector. The serialization is * done by Triangulation::save(). The given vector needs all information * on the locally active DoFs (it must be ghosted). See documentation of * this class for more information. + * + * @deprecated Use parallel::distributed::SolutionTransfer::prepare_for_serialization() instead. */ + DEAL_II_DEPRECATED void prepare_serialization(const VectorType &in); - /** * Same as the function above, only for a list of vectors. + * + * @deprecated Use parallel::distributed::SolutionTransfer::prepare_for_serialization() instead. */ + DEAL_II_DEPRECATED void prepare_serialization(const std::vector &all_in); - /** * Execute the deserialization of the given vector. This needs to be * done after calling Triangulation::load(). The given vector must be a diff --git a/source/distributed/solution_transfer.cc b/source/distributed/solution_transfer.cc index 4c4f5d9f26..0919d7c798 100644 --- a/source/distributed/solution_transfer.cc +++ b/source/distributed/solution_transfer.cc @@ -109,13 +109,34 @@ namespace parallel + template + void + SolutionTransfer:: + prepare_for_serialization(const VectorType &in) + { + std::vector all_in(1, &in); + prepare_for_serialization(all_in); + } + + + + template + void + SolutionTransfer:: + prepare_for_serialization(const std::vector &all_in) + { + prepare_for_coarsening_and_refinement(all_in); + } + + + template void SolutionTransfer::prepare_serialization( const VectorType &in) { std::vector all_in(1, &in); - prepare_serialization(all_in); + prepare_for_serialization(all_in); } diff --git a/tests/mpi/p4est_save_02.cc b/tests/mpi/p4est_save_02.cc index 1ab383a4d7..be0738f39f 100644 --- a/tests/mpi/p4est_save_02.cc +++ b/tests/mpi/p4est_save_02.cc @@ -97,7 +97,7 @@ test() x.compress(VectorOperation::insert); solution = x; - soltrans.prepare_serialization(solution); + soltrans.prepare_for_serialization(solution); tr.save(filename.c_str()); diff --git a/tests/mpi/p4est_save_03.cc b/tests/mpi/p4est_save_03.cc index 9780e2dfae..4619f2ad75 100644 --- a/tests/mpi/p4est_save_03.cc +++ b/tests/mpi/p4est_save_03.cc @@ -108,8 +108,8 @@ test() solution2 = x; - soltrans.prepare_serialization(solution); - soltrans2.prepare_serialization(solution2); + soltrans.prepare_for_serialization(solution); + soltrans2.prepare_for_serialization(solution2); tr.save(filename.c_str()); diff --git a/tests/mpi/p4est_save_04.cc b/tests/mpi/p4est_save_04.cc index c908edf673..0526ce29c2 100644 --- a/tests/mpi/p4est_save_04.cc +++ b/tests/mpi/p4est_save_04.cc @@ -100,7 +100,7 @@ test() x.compress(VectorOperation::insert); rel_x = x; - soltrans.prepare_serialization(rel_x); + soltrans.prepare_for_serialization(rel_x); tr.save("file"); // tr.write_mesh_vtk("before"); diff --git a/tests/mpi/p4est_save_06.cc b/tests/mpi/p4est_save_06.cc index 453ed58389..15025baeae 100644 --- a/tests/mpi/p4est_save_06.cc +++ b/tests/mpi/p4est_save_06.cc @@ -107,7 +107,7 @@ test() rel_x = x; dh.prepare_for_serialization_of_active_fe_indices(); - soltrans.prepare_serialization(rel_x); + soltrans.prepare_for_serialization(rel_x); tr.save("file"); deallog << "#cells: " << tr.n_global_active_cells() << std::endl -- 2.39.5