]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Declare conversion assignments for Tpetra vectors. 16663/head
authorSebastian Kinnewig <sebastian@kinnewig.org>
Thu, 15 Feb 2024 15:51:43 +0000 (16:51 +0100)
committerSebastian Kinnewig <kinnewig@ifam.uni-hannover.de>
Thu, 22 Feb 2024 13:06:06 +0000 (14:06 +0100)
include/deal.II/lac/trilinos_tpetra_vector.h
include/deal.II/lac/trilinos_tpetra_vector.templates.h
include/deal.II/lac/vector.h
include/deal.II/lac/vector.templates.h
source/lac/trilinos_tpetra_vector.cc
source/lac/vector.cc

index 0b9a2d3c165e6574537e19c063fcc64b107678e6..337305feeabe9461140704f05cd0292f5d865fae 100644 (file)
@@ -27,6 +27,7 @@
 
 #  include <deal.II/lac/read_vector.h>
 #  include <deal.II/lac/trilinos_tpetra_communication_pattern.h>
+#  include <deal.II/lac/vector.h>
 #  include <deal.II/lac/vector_operation.h>
 #  include <deal.II/lac/vector_type_traits.h>
 
@@ -401,6 +402,15 @@ namespace LinearAlgebra
       Vector &
       operator=(const Vector &V);
 
+      /**
+       * Copy function. This function takes a Vector and copies all the
+       * elements. The Vector will have the same parallel distribution as @p
+       * V.
+       */
+      template <typename OtherNumber>
+      Vector &
+      operator=(const dealii::Vector<OtherNumber> &V);
+
       /**
        * Sets all elements of the vector to the scalar @p s. This operation is
        * only allowed if @p s is equal to zero.
index db6cc93ed54bd812ac3848509a218fcf51db28a8..611454d08d3dbde8cc1c49c2b5d7a5ede11a38f1 100644 (file)
@@ -411,6 +411,30 @@ namespace LinearAlgebra
 
 
 
+    template <typename Number, typename MemorySpace>
+    template <typename OtherNumber>
+    Vector<Number, MemorySpace> &
+    Vector<Number, MemorySpace>::operator=(const dealii::Vector<OtherNumber> &V)
+    {
+      static_assert(
+        std::is_same<Number, OtherNumber>::value,
+        "TpetraWrappers::Vector and dealii::Vector must use the same number type here.");
+
+      vector.reset();
+      nonlocal_vector.reset();
+
+      Teuchos::Array<OtherNumber> vector_data(V.begin(), V.end());
+      vector = Utilities::Trilinos::internal::make_rcp<VectorType>(
+        V.locally_owned_elements().make_tpetra_map_rcp(), vector_data);
+
+      has_ghost  = false;
+      compressed = true;
+
+      return *this;
+    }
+
+
+
     template <typename Number, typename MemorySpace>
     Vector<Number, MemorySpace> &
     Vector<Number, MemorySpace>::operator=(const Number s)
index 92edce02029e294af2ae7ec1a2137ba1cc9893ed..c4e55f061170057770a727bf7be75d974cec87e5 100644 (file)
@@ -58,6 +58,17 @@ namespace TrilinosWrappers
 } // namespace TrilinosWrappers
 #  endif
 
+#  ifdef DEAL_II_TRILINOS_WITH_TPETRA
+namespace LinearAlgebra
+{
+  namespace TpetraWrappers
+  {
+    template <typename Number, typename MemorySpace>
+    class Vector;
+  }
+} // namespace LinearAlgebra
+#  endif
+
 template <typename number>
 class LAPACKFullMatrix;
 
@@ -234,6 +245,26 @@ public:
   explicit Vector(const TrilinosWrappers::MPI::Vector &v);
 #endif
 
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+  /**
+   * Another copy constructor: copy the values from a Trilinos wrapper vector.
+   * This copy constructor is only available if Trilinos was detected during
+   * configuration time.
+   *
+   * @note Due to the communication model used in MPI, this operation can
+   * only succeed if all processes that have knowledge of @p v
+   * (i.e. those given by <code>v.get_mpi_communicator()</code>) do it at
+   * the same time. This means that unless you use a split MPI communicator
+   * then it is not normally possible for only one or a subset of processes
+   * to obtain a copy of a parallel vector while the other jobs do something
+   * else. In other words, calling this function is a @ref GlossCollectiveOperation "collective operation"
+   * that needs to be executed by all MPI processes that jointly share @p v.
+   */
+  template <typename OtherNumber, typename MemorySpace>
+  explicit Vector(
+    const LinearAlgebra::TpetraWrappers::Vector<OtherNumber, MemorySpace> &v);
+#endif
+
   /**
    * Constructor. Set dimension to @p n and initialize all elements with zero.
    *
@@ -440,6 +471,28 @@ public:
   operator=(const TrilinosWrappers::MPI::Vector &v);
 #endif
 
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+  /**
+   * Another copy operator: copy the values from a (sequential or parallel,
+   * depending on the underlying compiler) Trilinos wrapper vector class. This
+   * operator is only available if Trilinos was detected during configuration
+   * time.
+   *
+   * @note Due to the communication model used in MPI, this operation can
+   * only succeed if all processes that have knowledge of @p v
+   * (i.e. those given by <code>v.get_mpi_communicator()</code>) do it at
+   * the same time. This means that unless you use a split MPI communicator
+   * then it is not normally possible for only one or a subset of processes
+   * to obtain a copy of a parallel vector while the other jobs do something
+   * else. In other words, calling this function is a @ref GlossCollectiveOperation "collective operation"
+   * that needs to be executed by all MPI processes that jointly share @p v.
+   */
+  template <typename OtherNumber, typename MemorySpace>
+  Vector<Number> &
+  operator=(
+    const LinearAlgebra::TpetraWrappers::Vector<OtherNumber, MemorySpace> &v);
+#endif
+
   /**
    * Test for equality. This function assumes that the present vector and the
    * one to compare with have the same size already, since comparing vectors
index 4dbc78bc32dd0818df377a327e674bf04f34fad1..c935cea81e6df2ad6f7605ebf9f401da8161ac0e 100644 (file)
 #  include <deal.II/lac/trilinos_vector.h>
 #endif
 
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+#  include <deal.II/lac/trilinos_tpetra_vector.h>
+#endif
+
 
 #include <algorithm>
 #include <cmath>
@@ -178,6 +182,60 @@ Vector<Number>::Vector(const TrilinosWrappers::MPI::Vector &v)
 #endif
 
 
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+template <typename Number>
+template <typename OtherNumber, typename MemorySpace>
+Vector<Number>::Vector(
+  const LinearAlgebra::TpetraWrappers::Vector<OtherNumber, MemorySpace> &v)
+  : values(v.size())
+{
+  static_assert(
+    std::is_same<Number, OtherNumber>::value,
+    "TpetraWrappers::Vector and dealii::Vector must use the same number type here.");
+
+  if (size() != 0)
+    {
+      // Copy the distributed vector to
+      // a local one at all processors
+      // that know about the original vector.
+      typename LinearAlgebra::TpetraWrappers::Vector<OtherNumber,
+                                                     MemorySpace>::VectorType
+        localized_vector(complete_index_set(size()).make_tpetra_map_rcp(),
+                         v.get_mpi_communicator());
+
+      Teuchos::RCP<const typename LinearAlgebra::TpetraWrappers::
+                     Vector<OtherNumber, MemorySpace>::ImportType>
+        importer = Tpetra::createImport(v.trilinos_vector().getMap(),
+                                        localized_vector.getMap());
+
+      localized_vector.doImport(v.trilinos_vector(), *importer, Tpetra::INSERT);
+
+      // get a kokkos view from the localized_vector
+#  if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
+      auto localized_vector_2d =
+        localized_vector.template getLocalView<Kokkos::HostSpace>(
+          Tpetra::Access::ReadOnly);
+#  else
+      localized_vector.template sync<Kokkos::HostSpace>();
+      auto localized_vector_2d =
+        localized_vector.template getLocalView<Kokkos::HostSpace>();
+#  endif
+      auto localized_vector_1d =
+        Kokkos::subview(localized_vector_2d, Kokkos::ALL(), 0);
+      const size_t local_length = localized_vector.getLocalLength();
+
+      Kokkos::DefaultHostExecutionSpace         exec;
+      Kokkos::View<Number *, Kokkos::HostSpace> values_view(values.data(),
+                                                            local_length);
+      Kokkos::deep_copy(exec, values_view, localized_vector_1d);
+      exec.fence();
+    }
+}
+
+#endif
+
+
 template <typename Number>
 inline Vector<Number> &
 Vector<Number>::operator=(const Vector<Number> &v)
@@ -809,6 +867,68 @@ Vector<Number>::operator=(const TrilinosWrappers::MPI::Vector &v)
 
 #endif
 
+
+
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+template <typename Number>
+template <typename OtherNumber, typename MemorySpace>
+Vector<Number> &
+Vector<Number>::operator=(
+  const LinearAlgebra::TpetraWrappers::Vector<OtherNumber, MemorySpace> &v)
+{
+  static_assert(
+    std::is_same<Number, OtherNumber>::value,
+    "TpetraWrappers::Vector and dealii::Vector must use the same number type here.");
+
+  if (v.size() != size())
+    reinit(v.size(), true);
+
+  if (size() != 0)
+    {
+      // Copy the distributed vector to
+      // a local one at all processors
+      // that know about the original vector.
+      typename LinearAlgebra::TpetraWrappers::Vector<OtherNumber,
+                                                     MemorySpace>::VectorType
+        localized_vector(complete_index_set(size()).make_tpetra_map_rcp(),
+                         v.get_mpi_communicator());
+
+      Teuchos::RCP<const typename LinearAlgebra::TpetraWrappers::
+                     Vector<OtherNumber, MemorySpace>::ImportType>
+        importer = Tpetra::createImport(v.trilinos_vector().getMap(),
+                                        localized_vector.getMap());
+
+      localized_vector.doImport(v.trilinos_vector(), *importer, Tpetra::INSERT);
+
+      // get a kokkos view from the localized_vector
+#  if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
+      auto localized_vector_2d =
+        localized_vector.template getLocalView<Kokkos::HostSpace>(
+          Tpetra::Access::ReadOnly);
+#  else
+      localized_vector.template sync<Kokkos::HostSpace>();
+      auto localized_vector_2d =
+        localized_vector.template getLocalView<Kokkos::HostSpace>();
+#  endif
+      auto localized_vector_1d =
+        Kokkos::subview(localized_vector_2d, Kokkos::ALL(), 0);
+      const size_t local_length = localized_vector.getLocalLength();
+
+      Kokkos::DefaultHostExecutionSpace         exec;
+      Kokkos::View<Number *, Kokkos::HostSpace> values_view(values.data(),
+                                                            local_length);
+      Kokkos::deep_copy(exec, values_view, localized_vector_1d);
+      exec.fence();
+    }
+
+  return *this;
+}
+
+#endif
+
+
+
 template <typename Number>
 template <typename Number2>
 bool
index f858d8d0bfb22d83fa2e1b92d6c93d79863757ed..db70885df00f830cc2461b71acec50310c11a339 100644 (file)
@@ -36,6 +36,8 @@ namespace LinearAlgebra
     static_assert(concepts::is_vector_space_vector<Vector<float>>);
 #    endif
     template class Vector<float>;
+    template Vector<float> &
+    Vector<float>::operator=<float>(const dealii::Vector<float> &);
     namespace internal
     {
       template class VectorReference<float>;
@@ -47,6 +49,8 @@ namespace LinearAlgebra
     static_assert(concepts::is_vector_space_vector<Vector<double>>);
 #    endif
     template class Vector<double>;
+    template Vector<double> &
+    Vector<double>::operator=<double>(const dealii::Vector<double> &);
     namespace internal
     {
       template class VectorReference<double>;
@@ -60,6 +64,9 @@ namespace LinearAlgebra
                   Vector<std::complex<float>>);
 #      endif
     template class Vector<std::complex<float>>;
+    template Vector<std::complex<float>> &
+    Vector<std::complex<float>>::operator=
+      <std::complex<float>>(const dealii::Vector<std::complex<float>> &);
     namespace internal
     {
       template class VectorReference<std::complex<float>>;
@@ -72,6 +79,9 @@ namespace LinearAlgebra
                   Vector<std::complex<double>>);
 #      endif
     template class Vector<std::complex<double>>;
+    template Vector<std::complex<double>> &
+    Vector<std::complex<double>>::operator=
+      <std::complex<double>>(const dealii::Vector<std::complex<double>> &);
     namespace internal
     {
       template class VectorReference<std::complex<double>>;
index 54d97765c140bd2ec83249efd78d0ba80df729c9..3374137809a4f72d68f70f0ad76e373ebe198775 100644 (file)
@@ -83,4 +83,40 @@ Vector<int>::lp_norm(const real_type) const
   return -1;
 }
 
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+#  ifdef HAVE_TPETRA_INST_FLOAT
+template Vector<float>::Vector(
+  const LinearAlgebra::TpetraWrappers::Vector<float> &);
+template Vector<float> &
+Vector<float>::operator=
+  <float>(const LinearAlgebra::TpetraWrappers::Vector<float> &);
+#  endif
+
+#  ifdef HAVE_TPETRA_INST_DOUBLE
+template Vector<double>::Vector(
+  const LinearAlgebra::TpetraWrappers::Vector<double> &);
+template Vector<double> &
+Vector<double>::operator=
+  <double>(const LinearAlgebra::TpetraWrappers::Vector<double> &);
+#  endif
+
+#  ifdef DEAL_II_WITH_COMPLEX_VALUES
+#    ifdef HAVE_TPETRA_INST_COMPLEX_FLOAT
+template Vector<std::complex<float>>::Vector(
+  const LinearAlgebra::TpetraWrappers::Vector<std::complex<float>> &);
+template Vector<std::complex<float>> &
+Vector<std::complex<float>>::operator=<std::complex<float>>(
+  const LinearAlgebra::TpetraWrappers::Vector<std::complex<float>> &);
+#    endif
+
+#    ifdef HAVE_TPETRA_INST_COMPLEX_DOUBLE
+template Vector<std::complex<double>>::Vector(
+  const LinearAlgebra::TpetraWrappers::Vector<std::complex<double>> &);
+template Vector<std::complex<double>> &
+Vector<std::complex<double>>::operator=<std::complex<double>>(
+  const LinearAlgebra::TpetraWrappers::Vector<std::complex<double>> &);
+#    endif
+#  endif
+#endif
+
 DEAL_II_NAMESPACE_CLOSE

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.