From 9f87174df80dded38bc80aefe428d3607ba91342 Mon Sep 17 00:00:00 2001 From: Sebastian Kinnewig Date: Thu, 15 Feb 2024 16:51:43 +0100 Subject: [PATCH] Declare conversion assignments for Tpetra vectors. --- include/deal.II/lac/trilinos_tpetra_vector.h | 10 ++ .../lac/trilinos_tpetra_vector.templates.h | 24 ++++ include/deal.II/lac/vector.h | 53 ++++++++ include/deal.II/lac/vector.templates.h | 120 ++++++++++++++++++ source/lac/trilinos_tpetra_vector.cc | 10 ++ source/lac/vector.cc | 36 ++++++ 6 files changed, 253 insertions(+) diff --git a/include/deal.II/lac/trilinos_tpetra_vector.h b/include/deal.II/lac/trilinos_tpetra_vector.h index 0b9a2d3c16..337305feea 100644 --- a/include/deal.II/lac/trilinos_tpetra_vector.h +++ b/include/deal.II/lac/trilinos_tpetra_vector.h @@ -27,6 +27,7 @@ # include # include +# include # include # include @@ -401,6 +402,15 @@ namespace LinearAlgebra Vector & operator=(const Vector &V); + /** + * Copy function. This function takes a Vector and copies all the + * elements. The Vector will have the same parallel distribution as @p + * V. + */ + template + Vector & + operator=(const dealii::Vector &V); + /** * Sets all elements of the vector to the scalar @p s. This operation is * only allowed if @p s is equal to zero. diff --git a/include/deal.II/lac/trilinos_tpetra_vector.templates.h b/include/deal.II/lac/trilinos_tpetra_vector.templates.h index db6cc93ed5..611454d08d 100644 --- a/include/deal.II/lac/trilinos_tpetra_vector.templates.h +++ b/include/deal.II/lac/trilinos_tpetra_vector.templates.h @@ -411,6 +411,30 @@ namespace LinearAlgebra + template + template + Vector & + Vector::operator=(const dealii::Vector &V) + { + static_assert( + std::is_same::value, + "TpetraWrappers::Vector and dealii::Vector must use the same number type here."); + + vector.reset(); + nonlocal_vector.reset(); + + Teuchos::Array vector_data(V.begin(), V.end()); + vector = Utilities::Trilinos::internal::make_rcp( + V.locally_owned_elements().make_tpetra_map_rcp(), vector_data); + + has_ghost = false; + compressed = true; + + return *this; + } + + + template Vector & Vector::operator=(const Number s) diff --git a/include/deal.II/lac/vector.h b/include/deal.II/lac/vector.h index 92edce0202..c4e55f0611 100644 --- a/include/deal.II/lac/vector.h +++ b/include/deal.II/lac/vector.h @@ -58,6 +58,17 @@ namespace TrilinosWrappers } // namespace TrilinosWrappers # endif +# ifdef DEAL_II_TRILINOS_WITH_TPETRA +namespace LinearAlgebra +{ + namespace TpetraWrappers + { + template + class Vector; + } +} // namespace LinearAlgebra +# endif + template class LAPACKFullMatrix; @@ -234,6 +245,26 @@ public: explicit Vector(const TrilinosWrappers::MPI::Vector &v); #endif +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + /** + * Another copy constructor: copy the values from a Trilinos wrapper vector. + * This copy constructor is only available if Trilinos was detected during + * configuration time. + * + * @note Due to the communication model used in MPI, this operation can + * only succeed if all processes that have knowledge of @p v + * (i.e. those given by v.get_mpi_communicator()) do it at + * the same time. This means that unless you use a split MPI communicator + * then it is not normally possible for only one or a subset of processes + * to obtain a copy of a parallel vector while the other jobs do something + * else. In other words, calling this function is a @ref GlossCollectiveOperation "collective operation" + * that needs to be executed by all MPI processes that jointly share @p v. + */ + template + explicit Vector( + const LinearAlgebra::TpetraWrappers::Vector &v); +#endif + /** * Constructor. Set dimension to @p n and initialize all elements with zero. * @@ -440,6 +471,28 @@ public: operator=(const TrilinosWrappers::MPI::Vector &v); #endif +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + /** + * Another copy operator: copy the values from a (sequential or parallel, + * depending on the underlying compiler) Trilinos wrapper vector class. This + * operator is only available if Trilinos was detected during configuration + * time. + * + * @note Due to the communication model used in MPI, this operation can + * only succeed if all processes that have knowledge of @p v + * (i.e. those given by v.get_mpi_communicator()) do it at + * the same time. This means that unless you use a split MPI communicator + * then it is not normally possible for only one or a subset of processes + * to obtain a copy of a parallel vector while the other jobs do something + * else. In other words, calling this function is a @ref GlossCollectiveOperation "collective operation" + * that needs to be executed by all MPI processes that jointly share @p v. + */ + template + Vector & + operator=( + const LinearAlgebra::TpetraWrappers::Vector &v); +#endif + /** * Test for equality. This function assumes that the present vector and the * one to compare with have the same size already, since comparing vectors diff --git a/include/deal.II/lac/vector.templates.h b/include/deal.II/lac/vector.templates.h index 4dbc78bc32..c935cea81e 100644 --- a/include/deal.II/lac/vector.templates.h +++ b/include/deal.II/lac/vector.templates.h @@ -35,6 +35,10 @@ # include #endif +#ifdef DEAL_II_TRILINOS_WITH_TPETRA +# include +#endif + #include #include @@ -178,6 +182,60 @@ Vector::Vector(const TrilinosWrappers::MPI::Vector &v) #endif +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + +template +template +Vector::Vector( + const LinearAlgebra::TpetraWrappers::Vector &v) + : values(v.size()) +{ + static_assert( + std::is_same::value, + "TpetraWrappers::Vector and dealii::Vector must use the same number type here."); + + if (size() != 0) + { + // Copy the distributed vector to + // a local one at all processors + // that know about the original vector. + typename LinearAlgebra::TpetraWrappers::Vector::VectorType + localized_vector(complete_index_set(size()).make_tpetra_map_rcp(), + v.get_mpi_communicator()); + + Teuchos::RCP::ImportType> + importer = Tpetra::createImport(v.trilinos_vector().getMap(), + localized_vector.getMap()); + + localized_vector.doImport(v.trilinos_vector(), *importer, Tpetra::INSERT); + + // get a kokkos view from the localized_vector +# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0) + auto localized_vector_2d = + localized_vector.template getLocalView( + Tpetra::Access::ReadOnly); +# else + localized_vector.template sync(); + auto localized_vector_2d = + localized_vector.template getLocalView(); +# endif + auto localized_vector_1d = + Kokkos::subview(localized_vector_2d, Kokkos::ALL(), 0); + const size_t local_length = localized_vector.getLocalLength(); + + Kokkos::DefaultHostExecutionSpace exec; + Kokkos::View values_view(values.data(), + local_length); + Kokkos::deep_copy(exec, values_view, localized_vector_1d); + exec.fence(); + } +} + +#endif + + template inline Vector & Vector::operator=(const Vector &v) @@ -809,6 +867,68 @@ Vector::operator=(const TrilinosWrappers::MPI::Vector &v) #endif + + +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + +template +template +Vector & +Vector::operator=( + const LinearAlgebra::TpetraWrappers::Vector &v) +{ + static_assert( + std::is_same::value, + "TpetraWrappers::Vector and dealii::Vector must use the same number type here."); + + if (v.size() != size()) + reinit(v.size(), true); + + if (size() != 0) + { + // Copy the distributed vector to + // a local one at all processors + // that know about the original vector. + typename LinearAlgebra::TpetraWrappers::Vector::VectorType + localized_vector(complete_index_set(size()).make_tpetra_map_rcp(), + v.get_mpi_communicator()); + + Teuchos::RCP::ImportType> + importer = Tpetra::createImport(v.trilinos_vector().getMap(), + localized_vector.getMap()); + + localized_vector.doImport(v.trilinos_vector(), *importer, Tpetra::INSERT); + + // get a kokkos view from the localized_vector +# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0) + auto localized_vector_2d = + localized_vector.template getLocalView( + Tpetra::Access::ReadOnly); +# else + localized_vector.template sync(); + auto localized_vector_2d = + localized_vector.template getLocalView(); +# endif + auto localized_vector_1d = + Kokkos::subview(localized_vector_2d, Kokkos::ALL(), 0); + const size_t local_length = localized_vector.getLocalLength(); + + Kokkos::DefaultHostExecutionSpace exec; + Kokkos::View values_view(values.data(), + local_length); + Kokkos::deep_copy(exec, values_view, localized_vector_1d); + exec.fence(); + } + + return *this; +} + +#endif + + + template template bool diff --git a/source/lac/trilinos_tpetra_vector.cc b/source/lac/trilinos_tpetra_vector.cc index f858d8d0bf..db70885df0 100644 --- a/source/lac/trilinos_tpetra_vector.cc +++ b/source/lac/trilinos_tpetra_vector.cc @@ -36,6 +36,8 @@ namespace LinearAlgebra static_assert(concepts::is_vector_space_vector>); # endif template class Vector; + template Vector & + Vector::operator=(const dealii::Vector &); namespace internal { template class VectorReference; @@ -47,6 +49,8 @@ namespace LinearAlgebra static_assert(concepts::is_vector_space_vector>); # endif template class Vector; + template Vector & + Vector::operator=(const dealii::Vector &); namespace internal { template class VectorReference; @@ -60,6 +64,9 @@ namespace LinearAlgebra Vector>); # endif template class Vector>; + template Vector> & + Vector>::operator= + >(const dealii::Vector> &); namespace internal { template class VectorReference>; @@ -72,6 +79,9 @@ namespace LinearAlgebra Vector>); # endif template class Vector>; + template Vector> & + Vector>::operator= + >(const dealii::Vector> &); namespace internal { template class VectorReference>; diff --git a/source/lac/vector.cc b/source/lac/vector.cc index 54d97765c1..3374137809 100644 --- a/source/lac/vector.cc +++ b/source/lac/vector.cc @@ -83,4 +83,40 @@ Vector::lp_norm(const real_type) const return -1; } +#ifdef DEAL_II_TRILINOS_WITH_TPETRA +# ifdef HAVE_TPETRA_INST_FLOAT +template Vector::Vector( + const LinearAlgebra::TpetraWrappers::Vector &); +template Vector & +Vector::operator= + (const LinearAlgebra::TpetraWrappers::Vector &); +# endif + +# ifdef HAVE_TPETRA_INST_DOUBLE +template Vector::Vector( + const LinearAlgebra::TpetraWrappers::Vector &); +template Vector & +Vector::operator= + (const LinearAlgebra::TpetraWrappers::Vector &); +# endif + +# ifdef DEAL_II_WITH_COMPLEX_VALUES +# ifdef HAVE_TPETRA_INST_COMPLEX_FLOAT +template Vector>::Vector( + const LinearAlgebra::TpetraWrappers::Vector> &); +template Vector> & +Vector>::operator=>( + const LinearAlgebra::TpetraWrappers::Vector> &); +# endif + +# ifdef HAVE_TPETRA_INST_COMPLEX_DOUBLE +template Vector>::Vector( + const LinearAlgebra::TpetraWrappers::Vector> &); +template Vector> & +Vector>::operator=>( + const LinearAlgebra::TpetraWrappers::Vector> &); +# endif +# endif +#endif + DEAL_II_NAMESPACE_CLOSE -- 2.39.5