]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Copy vector
authorPeter Munch <peterrmuench@gmail.com>
Sat, 21 Mar 2020 09:21:58 +0000 (10:21 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Sat, 21 Mar 2020 09:21:58 +0000 (10:21 +0100)
include/deal.II/lac/la_sm_vector.h [new file with mode: 0644]
include/deal.II/lac/la_sm_vector.templates.h [new file with mode: 0644]
source/lac/CMakeLists.txt
source/lac/la_sm_vector.cc [new file with mode: 0644]
source/lac/la_sm_vector.inst.in [new file with mode: 0644]

diff --git a/include/deal.II/lac/la_sm_vector.h b/include/deal.II/lac/la_sm_vector.h
new file mode 100644 (file)
index 0000000..eb2c206
--- /dev/null
@@ -0,0 +1,1931 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2011 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_la_sm_vector_h
+#define dealii_la_sm_vector_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/memory_space.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/numbers.h>
+#include <deal.II/base/partitioner.h>
+#include <deal.II/base/thread_management.h>
+
+#include <deal.II/lac/vector_operation.h>
+#include <deal.II/lac/vector_space_vector.h>
+#include <deal.II/lac/vector_type_traits.h>
+
+#include <iomanip>
+#include <memory>
+
+DEAL_II_NAMESPACE_OPEN
+
+// Forward declarations
+#ifndef DOXYGEN
+namespace LinearAlgebra
+{
+  /**
+   * A namespace for parallel implementations of vectors.
+   */
+  namespace SharedMPI
+  {
+    template <typename>
+    class BlockVector;
+  }
+
+  template <typename>
+  class ReadWriteVector;
+} // namespace LinearAlgebra
+
+#  ifdef DEAL_II_WITH_PETSC
+namespace PETScWrappers
+{
+  namespace MPI
+  {
+    class Vector;
+  }
+} // namespace PETScWrappers
+#  endif
+
+#  ifdef DEAL_II_WITH_TRILINOS
+namespace TrilinosWrappers
+{
+  namespace MPI
+  {
+    class Vector;
+  }
+} // namespace TrilinosWrappers
+#  endif
+#endif
+
+namespace LinearAlgebra
+{
+  namespace SharedMPI
+  {
+    /*! @addtogroup Vectors
+     *@{
+     */
+
+    /**
+     * Implementation of a parallel vector class. The design of this class is
+     * similar to the standard ::dealii::Vector class in deal.II, with the
+     * exception that storage is SharedMPI with MPI.
+     *
+     * The vector is designed for the following scheme of parallel
+     * partitioning:
+     * <ul>
+     * <li> The indices held by individual processes (locally owned part) in
+     * the MPI parallelization form a contiguous range
+     * <code>[my_first_index,my_last_index)</code>.
+     * <li> Ghost indices residing on arbitrary positions of other processors
+     * are allowed. It is in general more efficient if ghost indices are
+     * clustered, since they are stored as a set of intervals. The
+     * communication pattern of the ghost indices is determined when calling
+     * the function <code>reinit (locally_owned, ghost_indices,
+     * communicator)</code>, and retained until the partitioning is changed.
+     * This allows for efficient parallel communication of indices. In
+     * particular, it stores the communication pattern, rather than having to
+     * compute it again for every communication. For more information on ghost
+     * vectors, see also the
+     * @ref GlossGhostedVector "glossary entry on vectors with ghost elements".
+     * <li> Besides the usual global access operator() it is also possible to
+     * access vector entries in the local index space with the function @p
+     * local_element(). Locally owned indices are placed first, [0,
+     * local_size()), and then all ghost indices follow after them
+     * contiguously, [local_size(), local_size()+n_ghost_entries()).
+     * </ul>
+     *
+     * Functions related to parallel functionality:
+     * <ul>
+     * <li> The function <code>compress()</code> goes through the data
+     * associated with ghost indices and communicates it to the owner process,
+     * which can then add it to the correct position. This can be used e.g.
+     * after having run an assembly routine involving ghosts that fill this
+     * vector. Note that the @p insert mode of @p compress() does not set the
+     * elements included in ghost entries but simply discards them, assuming
+     * that the owning processor has set them to the desired value already
+     * (See also the
+     * @ref GlossCompress "glossary entry on compress").
+     * <li> The <code>update_ghost_values()</code> function imports the data
+     * from the owning processor to the ghost indices in order to provide read
+     * access to the data associated with ghosts.
+     * <li> It is possible to split the above functions into two phases, where
+     * the first initiates the communication and the second one finishes it.
+     * These functions can be used to overlap communication with computations
+     * in other parts of the code.
+     * <li> Of course, reduction operations (like norms) make use of
+     * collective all-to-all MPI communications.
+     * </ul>
+     *
+     * This vector can take two different states with respect to ghost
+     * elements:
+     * <ul>
+     * <li> After creation and whenever zero_out_ghosts() is called (or
+     * <code>operator= (0.)</code>), the vector does only allow writing into
+     * ghost elements but not reading from ghost elements.
+     * <li> After a call to update_ghost_values(), the vector does not allow
+     * writing into ghost elements but only reading from them. This is to
+     * avoid undesired ghost data artifacts when calling compress() after
+     * modifying some vector entries. The current status of the ghost entries
+     * (read mode or write mode) can be queried by the method
+     * has_ghost_elements(), which returns <code>true</code> exactly when
+     * ghost elements have been updated and <code>false</code> otherwise,
+     * irrespective of the actual number of ghost entries in the vector layout
+     * (for that information, use n_ghost_entries() instead).
+     * </ul>
+     *
+     * This vector uses the facilities of the class dealii::Vector<Number> for
+     * implementing the operations on the local range of the vector. In
+     * particular, it also inherits thread parallelism that splits most
+     * vector-vector operations into smaller chunks if the program uses
+     * multiple threads. This may or may not be desired when working also with
+     * MPI.
+     *
+     * <h4>Limitations regarding the vector size</h4>
+     *
+     * This vector class is based on two different number types for indexing.
+     * The so-called global index type encodes the overall size of the vector.
+     * Its type is types::global_dof_index. The largest possible value is
+     * <code>2^32-1</code> or approximately 4 billion in case 64 bit integers
+     * are disabled at configuration of deal.II (default case) or
+     * <code>2^64-1</code> or approximately <code>10^19</code> if 64 bit
+     * integers are enabled (see the glossary entry on
+     * @ref GlobalDoFIndex
+     * for further information).
+     *
+     * The second relevant index type is the local index used within one MPI
+     * rank. As opposed to the global index, the implementation assumes 32-bit
+     * unsigned integers unconditionally. In other words, to actually use a
+     * vector with more than four billion entries, you need to use MPI with
+     * more than one rank (which in general is a safe assumption since four
+     * billion entries consume at least 16 GB of memory for floats or 32 GB of
+     * memory for doubles) and enable 64-bit indices. If more than 4 billion
+     * local elements are present, the implementation tries to detect that,
+     * which triggers an exception and aborts the code. Note, however, that
+     * the detection of overflow is tricky and the detection mechanism might
+     * fail in some circumstances. Therefore, it is strongly recommended to
+     * not rely on this class to automatically detect the unsupported case.
+     *
+     * <h4>CUDA support</h4>
+     *
+     * This vector class supports two different memory spaces: Host and CUDA. By
+     * default, the memory space is Host and all the data are allocated on the
+     * CPU. When the memory space is CUDA, all the data is allocated on the GPU.
+     * The operations on the vector are performed on the chosen memory space. *
+     * From the host, there are two methods to access the elements of the Vector
+     * when using the CUDA memory space:
+     * <ul>
+     * <li> use get_values():
+     * @code
+     * Vector<double, MemorySpace::CUDA> vector(local_range, comm);
+     * double* vector_dev = vector.get_values();
+     * std::vector<double> vector_host(local_range.n_elements(), 1.);
+     * Utilities::CUDA::copy_to_dev(vector_host, vector_dev);
+     * @endcode
+     * <li> use import():
+     * @code
+     * Vector<double, MemorySpace::CUDA> vector(local_range, comm);
+     * ReadWriteVector<double> rw_vector(local_range);
+     * for (auto & val : rw_vector)
+     *   val = 1.;
+     * vector.import(rw_vector, VectorOperations::insert);
+     * @endcode
+     * </ul>
+     * The import method is a lot safer and will perform an MPI communication if
+     * necessary. Since an MPI communication may be performed, import needs to
+     * be called on all the processors.
+     *
+     * @note By default, all the ranks will try to access the device 0. This is
+     * fine is if you have one rank per node and one gpu per node. If you
+     * have multiple GPUs on one node, we need each process to access a
+     * different GPU. If each node has the same number of GPUs, this can be done
+     * as follows:
+     * <code> int n_devices = 0; cudaGetDeviceCount(&n_devices); int
+     * device_id = my_rank % n_devices;
+     * cudaSetDevice(device_id);
+     * </code>
+     * @see CUDAWrappers
+     *
+     * @author Katharina Kormann, Martin Kronbichler, Bruno Turcksin 2010, 2011,
+     * 2016, 2018
+     */
+    template <typename Number, typename MemorySpace = MemorySpace::Host>
+    class Vector : public ::dealii::LinearAlgebra::VectorSpaceVector<Number>,
+                   public Subscriptor
+    {
+    public:
+      using memory_space    = MemorySpace;
+      using value_type      = Number;
+      using pointer         = value_type *;
+      using const_pointer   = const value_type *;
+      using iterator        = value_type *;
+      using const_iterator  = const value_type *;
+      using reference       = value_type &;
+      using const_reference = const value_type &;
+      using size_type       = types::global_dof_index;
+      using real_type       = typename numbers::NumberTraits<Number>::real_type;
+
+      static_assert(
+        std::is_same<MemorySpace, ::dealii::MemorySpace::Host>::value ||
+          std::is_same<MemorySpace, ::dealii::MemorySpace::CUDA>::value,
+        "MemorySpace should be Host or CUDA");
+
+      /**
+       * @name 1: Basic Object-handling
+       */
+      //@{
+      /**
+       * Empty constructor.
+       */
+      Vector();
+
+      /**
+       * Copy constructor. Uses the parallel partitioning of @p in_vector.
+       * It should be noted that this constructor automatically sets ghost
+       * values to zero. Call @p update_ghost_values() directly following
+       * construction if a ghosted vector is required.
+       */
+      Vector(const Vector<Number, MemorySpace> &in_vector);
+
+      /**
+       * Construct a parallel vector of the given global size without any
+       * actual parallel distribution.
+       */
+      Vector(const size_type size);
+
+      /**
+       * Construct a parallel vector. The local range is specified by @p
+       * locally_owned_set (note that this must be a contiguous interval,
+       * multiple intervals are not possible). The IndexSet @p ghost_indices
+       * specifies ghost indices, i.e., indices which one might need to read
+       * data from or accumulate data from. It is allowed that the set of
+       * ghost indices also contains the local range, but it does not need to.
+       *
+       * This function involves global communication, so it should only be
+       * called once for a given layout. Use the constructor with
+       * Vector<Number> argument to create additional vectors with the same
+       * parallel layout.
+       *
+       * @see
+       * @ref GlossGhostedVector "vectors with ghost elements"
+       */
+      Vector(const IndexSet &local_range,
+             const IndexSet &ghost_indices,
+             const MPI_Comm  communicator);
+
+      /**
+       * Same constructor as above but without any ghost indices.
+       */
+      Vector(const IndexSet &local_range, const MPI_Comm communicator);
+
+      /**
+       * Create the vector based on the parallel partitioning described in @p
+       * partitioner. The input argument is a shared pointer, which store the
+       * partitioner data only once and share it between several vectors with
+       * the same layout.
+       */
+      Vector(
+        const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+      /**
+       * Destructor.
+       */
+      virtual ~Vector() override;
+
+      /**
+       * Set the global size of the vector to @p size without any actual
+       * parallel distribution.
+       */
+      void
+      reinit(const size_type size, const bool omit_zeroing_entries = false);
+
+      /**
+       * Uses the parallel layout of the input vector @p in_vector and
+       * allocates memory for this vector. Recommended initialization function
+       * when several vectors with the same layout should be created.
+       *
+       * If the flag @p omit_zeroing_entries is set to false, the memory will
+       * be initialized with zero, otherwise the memory will be untouched (and
+       * the user must make sure to fill it with reasonable data before using
+       * it).
+       */
+      template <typename Number2>
+      void
+      reinit(const Vector<Number2, MemorySpace> &in_vector,
+             const bool                          omit_zeroing_entries = false);
+
+      /**
+       * Initialize the vector. The local range is specified by @p
+       * locally_owned_set (note that this must be a contiguous interval,
+       * multiple intervals are not possible). The IndexSet @p ghost_indices
+       * specifies ghost indices, i.e., indices which one might need to read
+       * data from or accumulate data from. It is allowed that the set of
+       * ghost indices also contains the local range, but it does not need to.
+       *
+       * This function involves global communication, so it should only be
+       * called once for a given layout. Use the @p reinit function with
+       * Vector<Number> argument to create additional vectors with the same
+       * parallel layout.
+       *
+       * @see
+       * @ref GlossGhostedVector "vectors with ghost elements"
+       */
+      void
+      reinit(const IndexSet &local_range,
+             const IndexSet &ghost_indices,
+             const MPI_Comm  communicator);
+
+      /**
+       * Same as above, but without ghost entries.
+       */
+      void
+      reinit(const IndexSet &local_range, const MPI_Comm communicator);
+
+      /**
+       * Initialize the vector given to the parallel partitioning described in
+       * @p partitioner. The input argument is a shared pointer, which store
+       * the partitioner data only once and share it between several vectors
+       * with the same layout.
+       */
+      void
+      reinit(
+        const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+      /**
+       * Swap the contents of this vector and the other vector @p v. One could
+       * do this operation with a temporary variable and copying over the data
+       * elements, but this function is significantly more efficient since it
+       * only swaps the pointers to the data of the two vectors and therefore
+       * does not need to allocate temporary storage and move data around.
+       *
+       * This function is analogous to the @p swap function of all C++
+       * standard containers. Also, there is a global function
+       * <tt>swap(u,v)</tt> that simply calls <tt>u.swap(v)</tt>, again in
+       * analogy to standard functions.
+       *
+       * This function is virtual in order to allow for derived classes to
+       * handle memory separately.
+       */
+      void
+      swap(Vector<Number, MemorySpace> &v);
+
+      /**
+       * Assigns the vector to the parallel partitioning of the input vector
+       * @p in_vector, and copies all the data.
+       *
+       * If one of the input vector or the calling vector (to the left of the
+       * assignment operator) had ghost elements set before this operation,
+       * the calling vector will have ghost values set. Otherwise, it will be
+       * in write mode. If the input vector does not have any ghost elements
+       * at all, the vector will also update its ghost values in analogy to
+       * the respective setting the Trilinos and PETSc vectors.
+       */
+      Vector<Number, MemorySpace> &
+      operator=(const Vector<Number, MemorySpace> &in_vector);
+
+      /**
+       * Assigns the vector to the parallel partitioning of the input vector
+       * @p in_vector, and copies all the data.
+       *
+       * If one of the input vector or the calling vector (to the left of the
+       * assignment operator) had ghost elements set before this operation,
+       * the calling vector will have ghost values set. Otherwise, it will be
+       * in write mode. If the input vector does not have any ghost elements
+       * at all, the vector will also update its ghost values in analogy to
+       * the respective setting the Trilinos and PETSc vectors.
+       */
+      template <typename Number2>
+      Vector<Number, MemorySpace> &
+      operator=(const Vector<Number2, MemorySpace> &in_vector);
+
+#ifdef DEAL_II_WITH_PETSC
+      /**
+       * Copy the content of a PETSc vector into the calling vector. This
+       * function assumes that the vectors layouts have already been
+       * initialized to match.
+       *
+       * This operator is only available if deal.II was configured with PETSc.
+       *
+       * This function is deprecated. Use the interface through
+       * ReadWriteVector instead.
+       */
+      DEAL_II_DEPRECATED
+      Vector<Number, MemorySpace> &
+      operator=(const PETScWrappers::MPI::Vector &petsc_vec);
+#endif
+
+#ifdef DEAL_II_WITH_TRILINOS
+      /**
+       * Copy the content of a Trilinos vector into the calling vector. This
+       * function assumes that the vectors layouts have already been
+       * initialized to match.
+       *
+       * This operator is only available if deal.II was configured with
+       * Trilinos.
+       *
+       * This function is deprecated. Use the interface through
+       * ReadWriteVector instead.
+       */
+      DEAL_II_DEPRECATED
+      Vector<Number, MemorySpace> &
+      operator=(const TrilinosWrappers::MPI::Vector &trilinos_vec);
+#endif
+      //@}
+
+      /**
+       * @name 2: Parallel data exchange
+       */
+      //@{
+      /**
+       * This function copies the data that has accumulated in the data buffer
+       * for ghost indices to the owning processor. For the meaning of the
+       * argument @p operation, see the entry on
+       * @ref GlossCompress "Compressing SharedMPI vectors and matrices"
+       * in the glossary.
+       *
+       * There are four variants for this function. If called with argument @p
+       * VectorOperation::add adds all the data accumulated in ghost elements
+       * to the respective elements on the owning processor and clears the
+       * ghost array afterwards. If called with argument @p
+       * VectorOperation::insert, a set operation is performed. Since setting
+       * elements in a vector with ghost elements is ambiguous (as one can set
+       * both the element on the ghost site as well as the owning site), this
+       * operation makes the assumption that all data is set correctly on the
+       * owning processor. Upon call of compress(VectorOperation::insert), all
+       * ghost entries are thus simply zeroed out (using zero_ghost_values()).
+       * In debug mode, a check is performed for whether the data set is
+       * actually consistent between processors, i.e., whenever a non-zero
+       * ghost element is found, it is compared to the value on the owning
+       * processor and an exception is thrown if these elements do not agree.
+       * If called with VectorOperation::min or VectorOperation::max, the
+       * minimum or maximum on all elements across the processors is set.
+       * @note This vector class has a fixed set of ghost entries attached to
+       * the local representation. As a consequence, all ghost entries are
+       * assumed to be valid and will be exchanged unconditionally according
+       * to the given VectorOperation. Make sure to initialize all ghost
+       * entries with the neutral element of the given VectorOperation or
+       * touch all ghost entries. The neutral element is zero for
+       * VectorOperation::add and VectorOperation::insert, `+inf` for
+       * VectorOperation::min, and `-inf` for VectorOperation::max. If all
+       * values are initialized with values below zero and compress is called
+       * with VectorOperation::max two times subsequently, the maximal value
+       * after the second calculation will be zero.
+       */
+      virtual void
+      compress(::dealii::VectorOperation::values operation) override;
+
+      /**
+       * Fills the data field for ghost indices with the values stored in the
+       * respective positions of the owning processor. This function is needed
+       * before reading from ghosts. The function is @p const even though
+       * ghost data is changed. This is needed to allow functions with a @p
+       * const vector to perform the data exchange without creating
+       * temporaries.
+       *
+       * After calling this method, write access to ghost elements of the
+       * vector is forbidden and an exception is thrown. Only read access to
+       * ghost elements is allowed in this state. Note that all subsequent
+       * operations on this vector, like global vector addition, etc., will
+       * also update the ghost values by a call to this method after the
+       * operation. However, global reduction operations like norms or the
+       * inner product will always ignore ghost elements in order to avoid
+       * counting the ghost data more than once. To allow writing to ghost
+       * elements again, call zero_out_ghosts().
+       *
+       * @see
+       * @ref GlossGhostedVector "vectors with ghost elements"
+       */
+      void
+      update_ghost_values() const;
+
+      /**
+       * Initiates communication for the @p compress() function with non-
+       * blocking communication. This function does not wait for the transfer
+       * to finish, in order to allow for other computations during the time
+       * it takes until all data arrives.
+       *
+       * Before the data is actually exchanged, the function must be followed
+       * by a call to @p compress_finish().
+       *
+       * In case this function is called for more than one vector before @p
+       * compress_finish() is invoked, it is mandatory to specify a unique
+       * communication channel to each such call, in order to avoid several
+       * messages with the same ID that will corrupt this operation. Any
+       * communication channel less than 100 is a valid value (in particular,
+       * the range $[100, 200)$ is reserved for
+       * LinearAlgebra::SharedMPI::BlockVector).
+       */
+      void
+      compress_start(
+        const unsigned int                communication_channel = 0,
+        ::dealii::VectorOperation::values operation = VectorOperation::add);
+
+      /**
+       * For all requests that have been initiated in compress_start, wait for
+       * the communication to finish. Once it is finished, add or set the data
+       * (depending on the flag operation) to the respective positions in the
+       * owning processor, and clear the contents in the ghost data fields.
+       * The meaning of this argument is the same as in compress().
+       *
+       * This function should be called exactly once per vector after calling
+       * compress_start, otherwise the result is undefined. In particular, it
+       * is not well-defined to call compress_start on the same vector again
+       * before compress_finished has been called. However, there is no
+       * warning to prevent this situation.
+       *
+       * Must follow a call to the @p compress_start function.
+       */
+      void
+      compress_finish(::dealii::VectorOperation::values operation);
+
+      /**
+       * Initiates communication for the @p update_ghost_values() function
+       * with non-blocking communication. This function does not wait for the
+       * transfer to finish, in order to allow for other computations during
+       * the time it takes until all data arrives.
+       *
+       * Before the data is actually exchanged, the function must be followed
+       * by a call to @p update_ghost_values_finish().
+       *
+       * In case this function is called for more than one vector before @p
+       * update_ghost_values_finish() is invoked, it is mandatory to specify a
+       * unique communication channel to each such call, in order to avoid
+       * several messages with the same ID that will corrupt this operation.
+       * Any communication channel less than 100 is a valid value (in
+       * particular, the range $[100, 200)$ is reserved for
+       * LinearAlgebra::SharedMPI::BlockVector).
+       */
+      void
+      update_ghost_values_start(
+        const unsigned int communication_channel = 0) const;
+
+
+      /**
+       * For all requests that have been started in update_ghost_values_start,
+       * wait for the communication to finish.
+       *
+       * Must follow a call to the @p update_ghost_values_start function
+       * before reading data from ghost indices.
+       */
+      void
+      update_ghost_values_finish() const;
+
+      /**
+       * This method zeros the entries on ghost dofs, but does not touch
+       * locally owned DoFs.
+       *
+       * After calling this method, read access to ghost elements of the
+       * vector is forbidden and an exception is thrown. Only write access to
+       * ghost elements is allowed in this state.
+       */
+      void
+      zero_out_ghosts() const;
+
+      /**
+       * Return whether the vector currently is in a state where ghost values
+       * can be read or not. This is the same functionality as other parallel
+       * vectors have. If this method returns false, this only means that
+       * read-access to ghost elements is prohibited whereas write access is
+       * still possible (to those entries specified as ghosts during
+       * initialization), not that there are no ghost elements at all.
+       *
+       * @see
+       * @ref GlossGhostedVector "vectors with ghost elements"
+       */
+      bool
+      has_ghost_elements() const;
+
+      /**
+       * This method copies the data in the locally owned range from another
+       * SharedMPI vector @p src into the calling vector. As opposed to
+       * operator= that also includes ghost entries, this operation ignores
+       * the ghost range. The only prerequisite is that the local range on the
+       * calling vector and the given vector @p src are the same on all
+       * processors. It is explicitly allowed that the two vectors have
+       * different ghost elements that might or might not be related to each
+       * other.
+       *
+       * Since no data exchange is performed, make sure that neither @p src
+       * nor the calling vector have pending communications in order to obtain
+       * correct results.
+       */
+      template <typename Number2>
+      void
+      copy_locally_owned_data_from(const Vector<Number2, MemorySpace> &src);
+
+      /**
+       * Import all the elements present in the SharedMPI vector @p src.
+       * VectorOperation::values @p operation is used to decide if the elements
+       * in @p V should be added to the current vector or replace the current
+       * elements. The main purpose of this function is to get data from one
+       * memory space, e.g. CUDA, to the other, e.g. the Host.
+       *
+       * @note The partitioners of the two SharedMPI vectors need to be the
+       * same as no MPI communication is performed.
+       */
+      template <typename MemorySpace2>
+      void
+      import(const Vector<Number, MemorySpace2> &src,
+             VectorOperation::values             operation);
+
+      //@}
+
+      /**
+       * @name 3: Implementation of VectorSpaceVector
+       */
+      //@{
+
+      /**
+       * Change the dimension to that of the vector V. The elements of V are not
+       * copied.
+       */
+      virtual void
+      reinit(const VectorSpaceVector<Number> &V,
+             const bool omit_zeroing_entries = false) override;
+
+      /**
+       * Multiply the entire vector by a fixed factor.
+       */
+      virtual Vector<Number, MemorySpace> &
+      operator*=(const Number factor) override;
+
+      /**
+       * Divide the entire vector by a fixed factor.
+       */
+      virtual Vector<Number, MemorySpace> &
+      operator/=(const Number factor) override;
+
+      /**
+       * Add the vector @p V to the present one.
+       */
+      virtual Vector<Number, MemorySpace> &
+      operator+=(const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Subtract the vector @p V from the present one.
+       */
+      virtual Vector<Number, MemorySpace> &
+      operator-=(const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Import all the elements present in the vector's IndexSet from the input
+       * vector @p V. VectorOperation::values @p operation is used to decide if
+       * the elements in @p V should be added to the current vector or replace the
+       * current elements. The last parameter can be used if the same
+       * communication pattern is used multiple times. This can be used to
+       * improve performance.
+       *
+       * @note If the MemorySpace is CUDA, the data in the ReadWriteVector will
+       * be moved to the device.
+       */
+      virtual void
+      import(
+        const LinearAlgebra::ReadWriteVector<Number> &  V,
+        VectorOperation::values                         operation,
+        std::shared_ptr<const CommunicationPatternBase> communication_pattern =
+          std::shared_ptr<const CommunicationPatternBase>()) override;
+
+      /**
+       * Return the scalar product of two vectors.
+       */
+      virtual Number
+      operator*(const VectorSpaceVector<Number> &V) const override;
+
+      /**
+       * Add @p a to all components. Note that @p a is a scalar not a vector.
+       */
+      virtual void
+      add(const Number a) override;
+
+      /**
+       * Simple addition of a multiple of a vector, i.e. <tt>*this += a*V</tt>.
+       */
+      virtual void
+      add(const Number a, const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Multiple addition of scaled vectors, i.e. <tt>*this += a*V+b*W</tt>.
+       */
+      virtual void
+      add(const Number                     a,
+          const VectorSpaceVector<Number> &V,
+          const Number                     b,
+          const VectorSpaceVector<Number> &W) override;
+
+      /**
+       * A collective add operation: This function adds a whole set of values
+       * stored in @p values to the vector components specified by @p indices.
+       */
+      virtual void
+      add(const std::vector<size_type> &indices,
+          const std::vector<Number> &   values);
+
+      /**
+       * Scaling and simple addition of a multiple of a vector, i.e. <tt>*this =
+       * s*(*this)+a*V</tt>.
+       */
+      virtual void
+      sadd(const Number                     s,
+           const Number                     a,
+           const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Scale each element of this vector by the corresponding element in the
+       * argument. This function is mostly meant to simulate multiplication (and
+       * immediate re-assignment) by a diagonal scaling matrix.
+       */
+      virtual void
+      scale(const VectorSpaceVector<Number> &scaling_factors) override;
+
+      /**
+       * Assignment <tt>*this = a*V</tt>.
+       */
+      virtual void
+      equ(const Number a, const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Return the l<sub>1</sub> norm of the vector (i.e., the sum of the
+       * absolute values of all entries among all processors).
+       */
+      virtual real_type
+      l1_norm() const override;
+
+      /**
+       * Return the $l_2$ norm of the vector (i.e., the square root of
+       * the sum of the square of all entries among all processors).
+       */
+      virtual real_type
+      l2_norm() const override;
+
+      /**
+       * Return the square of the $l_2$ norm of the vector.
+       */
+      real_type
+      norm_sqr() const;
+
+      /**
+       * Return the maximum norm of the vector (i.e., the maximum absolute value
+       * among all entries and among all processors).
+       */
+      virtual real_type
+      linfty_norm() const override;
+
+      /**
+       * Perform a combined operation of a vector addition and a subsequent
+       * inner product, returning the value of the inner product. In other
+       * words, the result of this function is the same as if the user called
+       * @code
+       * this->add(a, V);
+       * return_value = *this * W;
+       * @endcode
+       *
+       * The reason this function exists is that this operation involves less
+       * memory transfer than calling the two functions separately. This method
+       * only needs to load three vectors, @p this, @p V, @p W, whereas calling
+       * separate methods means to load the calling vector @p this twice. Since
+       * most vector operations are memory transfer limited, this reduces the
+       * time by 25\% (or 50\% if @p W equals @p this).
+       *
+       * For complex-valued vectors, the scalar product in the second step is
+       * implemented as
+       * $\left<v,w\right>=\sum_i v_i \bar{w_i}$.
+       */
+      virtual Number
+      add_and_dot(const Number                     a,
+                  const VectorSpaceVector<Number> &V,
+                  const VectorSpaceVector<Number> &W) override;
+
+      /**
+       * Return the global size of the vector, equal to the sum of the number of
+       * locally owned indices among all processors.
+       */
+      virtual size_type
+      size() const override;
+
+      /**
+       * Return an index set that describes which elements of this vector are
+       * owned by the current processor. As a consequence, the index sets
+       * returned on different processors if this is a SharedMPI vector will
+       * form disjoint sets that add up to the complete index set. Obviously, if
+       * a vector is created on only one processor, then the result would
+       * satisfy
+       * @code
+       *  vec.locally_owned_elements() == complete_index_set(vec.size())
+       * @endcode
+       */
+      virtual dealii::IndexSet
+      locally_owned_elements() const override;
+
+      /**
+       * Print the vector to the output stream @p out.
+       */
+      virtual void
+      print(std::ostream &     out,
+            const unsigned int precision  = 3,
+            const bool         scientific = true,
+            const bool         across     = true) const override;
+
+      /**
+       * Return the memory consumption of this class in bytes.
+       */
+      virtual std::size_t
+      memory_consumption() const override;
+      //@}
+
+      /**
+       * @name 4: Other vector operations not included in VectorSpaceVector
+       */
+      //@{
+
+      /**
+       * Sets all elements of the vector to the scalar @p s. If the scalar is
+       * zero, also ghost elements are set to zero, otherwise they remain
+       * unchanged.
+       */
+      virtual Vector<Number, MemorySpace> &
+      operator=(const Number s) override;
+
+      /**
+       * This is a collective add operation that adds a whole set of values
+       * stored in @p values to the vector components specified by @p indices.
+       */
+      template <typename OtherNumber>
+      void
+      add(const std::vector<size_type> &       indices,
+          const ::dealii::Vector<OtherNumber> &values);
+
+      /**
+       * Take an address where n_elements are stored contiguously and add them
+       * into the vector.
+       */
+      template <typename OtherNumber>
+      void
+      add(const size_type    n_elements,
+          const size_type *  indices,
+          const OtherNumber *values);
+
+      /**
+       * Scaling and simple vector addition, i.e.  <tt>*this =
+       * s*(*this)+V</tt>.
+       */
+      void
+      sadd(const Number s, const Vector<Number, MemorySpace> &V);
+
+      /**
+       * Scaling and multiple addition.
+       *
+       * This function is deprecated.
+       */
+      DEAL_II_DEPRECATED
+      void
+      sadd(const Number                       s,
+           const Number                       a,
+           const Vector<Number, MemorySpace> &V,
+           const Number                       b,
+           const Vector<Number, MemorySpace> &W);
+
+      /**
+       * Assignment <tt>*this = a*u + b*v</tt>.
+       *
+       * This function is deprecated.
+       */
+      DEAL_II_DEPRECATED
+      void
+      equ(const Number                       a,
+          const Vector<Number, MemorySpace> &u,
+          const Number                       b,
+          const Vector<Number, MemorySpace> &v);
+
+      //@}
+
+
+      /**
+       * @name 5: Entry access and local data representation
+       */
+      //@{
+
+      /**
+       * Return the local size of the vector, i.e., the number of indices
+       * owned locally.
+       */
+      size_type
+      local_size() const;
+
+      /**
+       * Return the half-open interval that specifies the locally owned range
+       * of the vector. Note that <code>local_size() == local_range().second -
+       * local_range().first</code>.
+       *
+       * This function is deprecated.
+       */
+      DEAL_II_DEPRECATED
+      std::pair<size_type, size_type>
+      local_range() const;
+
+      /**
+       * Return true if the given global index is in the local range of this
+       * processor.
+       *
+       * This function is deprecated.
+       */
+      DEAL_II_DEPRECATED
+      bool
+      in_local_range(const size_type global_index) const;
+
+      /**
+       * Return the number of ghost elements present on the vector.
+       *
+       * This function is deprecated.
+       */
+      DEAL_II_DEPRECATED
+      size_type
+      n_ghost_entries() const;
+
+      /**
+       * Return an index set that describes which elements of this vector are
+       * not owned by the current processor but can be written into or read
+       * from locally (ghost elements).
+       *
+       * This function is deprecated.
+       */
+      DEAL_II_DEPRECATED
+      const IndexSet &
+      ghost_elements() const;
+
+      /**
+       * Return whether the given global index is a ghost index on the
+       * present processor. Returns false for indices that are owned locally
+       * and for indices not present at all.
+       *
+       * This function is deprecated.
+       */
+      DEAL_II_DEPRECATED
+      bool
+      is_ghost_entry(const types::global_dof_index global_index) const;
+
+      /**
+       * Make the @p Vector class a bit like the <tt>vector<></tt> class of
+       * the C++ standard library by returning iterators to the start and end
+       * of the <i>locally owned</i> elements of this vector.
+       *
+       * It holds that end() - begin() == local_size().
+       *
+       * @note For the CUDA memory space, the iterator points to memory on the
+       * device.
+       */
+      iterator
+      begin();
+
+      /**
+       * Return constant iterator to the start of the locally owned elements
+       * of the vector.
+       *
+       * @note For the CUDA memory space, the iterator points to memory on the
+       * device.
+       */
+      const_iterator
+      begin() const;
+
+      /**
+       * Return an iterator pointing to the element past the end of the array
+       * of locally owned entries.
+       *
+       * @note For the CUDA memory space, the iterator points to memory on the
+       * device.
+       */
+      iterator
+      end();
+
+      /**
+       * Return a constant iterator pointing to the element past the end of
+       * the array of the locally owned entries.
+       *
+       * @note For the CUDA memory space, the iterator points to memory on the
+       * device.
+       */
+      const_iterator
+      end() const;
+
+      /**
+       * Read access to the data in the position corresponding to @p
+       * global_index. The index must be either in the local range of the
+       * vector or be specified as a ghost index at construction.
+       *
+       * Performance: <tt>O(1)</tt> for locally owned elements that represent
+       * a contiguous range and <tt>O(log(n<sub>ranges</sub>))</tt> for ghost
+       * elements (quite fast, but slower than local_element()).
+       */
+      Number
+      operator()(const size_type global_index) const;
+
+      /**
+       * Read and write access to the data in the position corresponding to @p
+       * global_index. The index must be either in the local range of the
+       * vector or be specified as a ghost index at construction.
+       *
+       * Performance: <tt>O(1)</tt> for locally owned elements that represent
+       * a contiguous range and <tt>O(log(n<sub>ranges</sub>))</tt> for ghost
+       * elements (quite fast, but slower than local_element()).
+       */
+      Number &
+      operator()(const size_type global_index);
+
+      /**
+       * Read access to the data in the position corresponding to @p
+       * global_index. The index must be either in the local range of the
+       * vector or be specified as a ghost index at construction.
+       *
+       * This function does the same thing as operator().
+       */
+      Number operator[](const size_type global_index) const;
+      /**
+       * Read and write access to the data in the position corresponding to @p
+       * global_index. The index must be either in the local range of the
+       * vector or be specified as a ghost index at construction.
+       *
+       * This function does the same thing as operator().
+       */
+      Number &operator[](const size_type global_index);
+
+      /**
+       * Read access to the data field specified by @p local_index. Locally
+       * owned indices can be accessed with indices
+       * <code>[0,local_size)</code>, and ghost indices with indices
+       * <code>[local_size,local_size+ n_ghost_entries]</code>.
+       *
+       * Performance: Direct array access (fast).
+       */
+      Number
+      local_element(const size_type local_index) const;
+
+      /**
+       * Read and write access to the data field specified by @p local_index.
+       * Locally owned indices can be accessed with indices
+       * <code>[0,local_size)</code>, and ghost indices with indices
+       * <code>[local_size,local_size+n_ghosts]</code>.
+       *
+       * Performance: Direct array access (fast).
+       */
+      Number &
+      local_element(const size_type local_index);
+
+      /**
+       * Return the pointer to the underlying raw array.
+       *
+       * @note For the CUDA memory space, the pointer points to memory on the
+       * device.
+       */
+      Number *
+      get_values() const;
+
+      /**
+       * Instead of getting individual elements of a vector via operator(),
+       * this function allows getting a whole set of elements at once. The
+       * indices of the elements to be read are stated in the first argument,
+       * the corresponding values are returned in the second.
+       *
+       * If the current vector is called @p v, then this function is the equivalent
+       * to the code
+       * @code
+       *   for (unsigned int i=0; i<indices.size(); ++i)
+       *     values[i] = v[indices[i]];
+       * @endcode
+       *
+       * @pre The sizes of the @p indices and @p values arrays must be identical.
+       *
+       * @note This function is not implemented for CUDA memory space.
+       */
+      template <typename OtherNumber>
+      void
+      extract_subvector_to(const std::vector<size_type> &indices,
+                           std::vector<OtherNumber> &    values) const;
+
+      /**
+       * Instead of getting individual elements of a vector via operator(),
+       * this function allows getting a whole set of elements at once. In
+       * contrast to the previous function, this function obtains the
+       * indices of the elements by dereferencing all elements of the iterator
+       * range provided by the first two arguments, and puts the vector
+       * values into memory locations obtained by dereferencing a range
+       * of iterators starting at the location pointed to by the third
+       * argument.
+       *
+       * If the current vector is called @p v, then this function is the equivalent
+       * to the code
+       * @code
+       *   ForwardIterator indices_p = indices_begin;
+       *   OutputIterator  values_p  = values_begin;
+       *   while (indices_p != indices_end)
+       *   {
+       *     *values_p = v[*indices_p];
+       *     ++indices_p;
+       *     ++values_p;
+       *   }
+       * @endcode
+       *
+       * @pre It must be possible to write into as many memory locations
+       *   starting at @p values_begin as there are iterators between
+       *   @p indices_begin and @p indices_end.
+       */
+      template <typename ForwardIterator, typename OutputIterator>
+      void
+      extract_subvector_to(ForwardIterator       indices_begin,
+                           const ForwardIterator indices_end,
+                           OutputIterator        values_begin) const;
+      /**
+       * Return whether the vector contains only elements with value zero.
+       * This is a collective operation. This function is expensive, because
+       * potentially all elements have to be checked.
+       */
+      virtual bool
+      all_zero() const override;
+
+      /**
+       * Compute the mean value of all the entries in the vector.
+       */
+      virtual Number
+      mean_value() const override;
+
+      /**
+       * $l_p$-norm of the vector. The pth root of the sum of the pth powers
+       * of the absolute values of the elements.
+       */
+      real_type
+      lp_norm(const real_type p) const;
+      //@}
+
+      /**
+       * @name 6: Mixed stuff
+       */
+      //@{
+
+      /**
+       * Return a reference to the MPI communicator object in use with this
+       * vector.
+       */
+      const MPI_Comm &
+      get_mpi_communicator() const;
+
+      /**
+       * Return the MPI partitioner that describes the parallel layout of the
+       * vector. This object can be used to initialize another vector with the
+       * respective reinit() call, for additional queries regarding the
+       * parallel communication, or the compatibility of partitioners.
+       */
+      const std::shared_ptr<const Utilities::MPI::Partitioner> &
+      get_partitioner() const;
+
+      /**
+       * Check whether the given partitioner is compatible with the
+       * partitioner used for this vector. Two partitioners are compatible if
+       * they have the same local size and the same ghost indices. They do not
+       * necessarily need to be the same data field of the shared pointer.
+       * This is a local operation only, i.e., if only some processors decide
+       * that the partitioning is not compatible, only these processors will
+       * return @p false, whereas the other processors will return @p true.
+       */
+      bool
+      partitioners_are_compatible(
+        const Utilities::MPI::Partitioner &part) const;
+
+      /**
+       * Check whether the given partitioner is compatible with the
+       * partitioner used for this vector. Two partitioners are compatible if
+       * they have the same local size and the same ghost indices. They do not
+       * necessarily need to be the same data field. As opposed to
+       * partitioners_are_compatible(), this method checks for compatibility
+       * among all processors and the method only returns @p true if the
+       * partitioner is the same on all processors.
+       *
+       * This method performs global communication, so make sure to use it
+       * only in a context where all processors call it the same number of
+       * times.
+       */
+      bool
+      partitioners_are_globally_compatible(
+        const Utilities::MPI::Partitioner &part) const;
+
+      /**
+       * Change the ghost state of this vector to @p ghosted.
+       */
+      void
+      set_ghost_state(const bool ghosted) const;
+
+      //@}
+
+      /**
+       * Attempt to perform an operation between two incompatible vector types.
+       *
+       * @ingroup Exceptions
+       */
+      DeclException0(ExcVectorTypeNotCompatible);
+
+      /**
+       * Attempt to perform an operation not implemented on the device.
+       *
+       * @ingroup Exceptions
+       */
+      DeclException0(ExcNotAllowedForCuda);
+
+      /**
+       * Exception
+       */
+      DeclException3(ExcNonMatchingElements,
+                     Number,
+                     Number,
+                     unsigned int,
+                     << "Called compress(VectorOperation::insert), but"
+                     << " the element received from a remote processor, value "
+                     << std::setprecision(16) << arg1
+                     << ", does not match with the value "
+                     << std::setprecision(16) << arg2
+                     << " on the owner processor " << arg3);
+
+      /**
+       * Exception
+       */
+      DeclException4(ExcAccessToNonLocalElement,
+                     size_type,
+                     size_type,
+                     size_type,
+                     size_type,
+                     << "You tried to access element " << arg1
+                     << " of a SharedMPI vector, but this element is not "
+                     << "stored on the current processor. Note: The range of "
+                     << "locally owned elements is " << arg2 << " to " << arg3
+                     << ", and there are " << arg4 << " ghost elements "
+                     << "that this vector can access.");
+
+    private:
+      /**
+       * Simple addition of a multiple of a vector, i.e. <tt>*this += a*V</tt>
+       * without MPI communication.
+       */
+      void
+      add_local(const Number a, const VectorSpaceVector<Number> &V);
+
+      /**
+       * Scaling and simple addition of a multiple of a vector, i.e. <tt>*this =
+       * s*(*this)+a*V</tt> without MPI communication.
+       */
+      void
+      sadd_local(const Number                     s,
+                 const Number                     a,
+                 const VectorSpaceVector<Number> &V);
+
+      /**
+       * Local part of the inner product of two vectors.
+       */
+      template <typename Number2>
+      Number
+      inner_product_local(const Vector<Number2, MemorySpace> &V) const;
+
+      /**
+       * Local part of norm_sqr().
+       */
+      real_type
+      norm_sqr_local() const;
+
+      /**
+       * Local part of mean_value().
+       */
+      Number
+      mean_value_local() const;
+
+      /**
+       * Local part of l1_norm().
+       */
+      real_type
+      l1_norm_local() const;
+
+      /**
+       * Local part of lp_norm().
+       */
+      real_type
+      lp_norm_local(const real_type p) const;
+
+      /**
+       * Local part of linfty_norm().
+       */
+      real_type
+      linfty_norm_local() const;
+
+      /**
+       * Local part of the addition followed by an inner product of two
+       * vectors. The same applies for complex-valued vectors as for
+       * the add_and_dot() function.
+       */
+      Number
+      add_and_dot_local(const Number                       a,
+                        const Vector<Number, MemorySpace> &V,
+                        const Vector<Number, MemorySpace> &W);
+
+      /**
+       * Shared pointer to store the parallel partitioning information. This
+       * information can be shared between several vectors that have the same
+       * partitioning.
+       */
+      std::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
+
+      /**
+       * The size that is currently allocated in the val array.
+       */
+      size_type allocated_size;
+
+      /**
+       * Underlying data structure storing the local elements of this vector.
+       */
+      mutable ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace> data;
+
+      /**
+       * For parallel loops with TBB, this member variable stores the affinity
+       * information of loops.
+       */
+      mutable std::shared_ptr<::dealii::parallel::internal::TBBPartitioner>
+        thread_loop_partitioner;
+
+      /**
+       * Temporary storage that holds the data that is sent to this processor
+       * in @p compress() or sent from this processor in
+       * @p update_ghost_values.
+       */
+      mutable ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace>
+        import_data;
+
+      /**
+       * Stores whether the vector currently allows for reading ghost elements
+       * or not. Note that this is to ensure consistent ghost data and does
+       * not indicate whether the vector actually can store ghost elements. In
+       * particular, when assembling a vector we do not allow reading
+       * elements, only writing them.
+       */
+      mutable bool vector_is_ghosted;
+
+#ifdef DEAL_II_WITH_MPI
+      /**
+       * A vector that collects all requests from @p compress() operations.
+       * This class uses persistent MPI communicators, i.e., the communication
+       * channels are stored during successive calls to a given function. This
+       * reduces the overhead involved with setting up the MPI machinery, but
+       * it does not remove the need for a receive operation to be posted
+       * before the data can actually be sent.
+       */
+      std::vector<MPI_Request> compress_requests;
+
+      /**
+       * A vector that collects all requests from @p update_ghost_values()
+       * operations. This class uses persistent MPI communicators.
+       */
+      mutable std::vector<MPI_Request> update_ghost_values_requests;
+#endif
+
+      /**
+       * A lock that makes sure that the @p compress and @p
+       * update_ghost_values functions give reasonable results also when used
+       * with several threads.
+       */
+      mutable std::mutex mutex;
+
+      /**
+       * A helper function that clears the compress_requests and
+       * update_ghost_values_requests field. Used in reinit functions.
+       */
+      void
+      clear_mpi_requests();
+
+      /**
+       * A helper function that is used to resize the val array.
+       */
+      void
+      resize_val(const size_type new_allocated_size);
+
+      // Make all other vector types friends.
+      template <typename Number2, typename MemorySpace2>
+      friend class Vector;
+
+      // Make BlockVector type friends.
+      template <typename Number2>
+      friend class BlockVector;
+    };
+    /*@}*/
+
+
+    /*-------------------- Inline functions ---------------------------------*/
+
+#ifndef DOXYGEN
+
+    namespace internal
+    {
+      template <typename Number, typename MemorySpace>
+      struct Policy
+      {
+        static inline typename Vector<Number, MemorySpace>::iterator
+        begin(::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace> &)
+        {
+          return nullptr;
+        }
+
+        static inline typename Vector<Number, MemorySpace>::const_iterator
+        begin(
+          const ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace> &)
+        {
+          return nullptr;
+        }
+
+        static inline Number *
+        get_values(
+          ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace> &)
+        {
+          return nullptr;
+        }
+      };
+
+
+
+      template <typename Number>
+      struct Policy<Number, ::dealii::MemorySpace::Host>
+      {
+        static inline
+          typename Vector<Number, ::dealii::MemorySpace::Host>::iterator
+          begin(::dealii::MemorySpace::
+                  MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
+        {
+          return data.values.get();
+        }
+
+        static inline
+          typename Vector<Number, ::dealii::MemorySpace::Host>::const_iterator
+          begin(const ::dealii::MemorySpace::
+                  MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
+        {
+          return data.values.get();
+        }
+
+        static inline Number *
+        get_values(::dealii::MemorySpace::
+                     MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
+        {
+          return data.values.get();
+        }
+      };
+
+
+
+      template <typename Number>
+      struct Policy<Number, ::dealii::MemorySpace::CUDA>
+      {
+        static inline
+          typename Vector<Number, ::dealii::MemorySpace::CUDA>::iterator
+          begin(::dealii::MemorySpace::
+                  MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
+        {
+          return data.values_dev.get();
+        }
+
+        static inline
+          typename Vector<Number, ::dealii::MemorySpace::CUDA>::const_iterator
+          begin(const ::dealii::MemorySpace::
+                  MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
+        {
+          return data.values_dev.get();
+        }
+
+        static inline Number *
+        get_values(::dealii::MemorySpace::
+                     MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
+        {
+          return data.values_dev.get();
+        }
+      };
+    } // namespace internal
+
+
+    template <typename Number, typename MemorySpace>
+    inline bool
+    Vector<Number, MemorySpace>::has_ghost_elements() const
+    {
+      return vector_is_ghosted;
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline typename Vector<Number, MemorySpace>::size_type
+    Vector<Number, MemorySpace>::size() const
+    {
+      return partitioner->size();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline typename Vector<Number, MemorySpace>::size_type
+    Vector<Number, MemorySpace>::local_size() const
+    {
+      return partitioner->local_size();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline std::pair<typename Vector<Number, MemorySpace>::size_type,
+                     typename Vector<Number, MemorySpace>::size_type>
+    Vector<Number, MemorySpace>::local_range() const
+    {
+      return partitioner->local_range();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline bool
+    Vector<Number, MemorySpace>::in_local_range(
+      const size_type global_index) const
+    {
+      return partitioner->in_local_range(global_index);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline IndexSet
+    Vector<Number, MemorySpace>::locally_owned_elements() const
+    {
+      IndexSet is(size());
+
+      is.add_range(partitioner->local_range().first,
+                   partitioner->local_range().second);
+
+      return is;
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline typename Vector<Number, MemorySpace>::size_type
+    Vector<Number, MemorySpace>::n_ghost_entries() const
+    {
+      return partitioner->n_ghost_indices();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline const IndexSet &
+    Vector<Number, MemorySpace>::ghost_elements() const
+    {
+      return partitioner->ghost_indices();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline bool
+    Vector<Number, MemorySpace>::is_ghost_entry(
+      const size_type global_index) const
+    {
+      return partitioner->is_ghost_entry(global_index);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline typename Vector<Number, MemorySpace>::iterator
+    Vector<Number, MemorySpace>::begin()
+    {
+      return internal::Policy<Number, MemorySpace>::begin(data);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline typename Vector<Number, MemorySpace>::const_iterator
+    Vector<Number, MemorySpace>::begin() const
+    {
+      return internal::Policy<Number, MemorySpace>::begin(data);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline typename Vector<Number, MemorySpace>::iterator
+    Vector<Number, MemorySpace>::end()
+    {
+      return internal::Policy<Number, MemorySpace>::begin(data) +
+             partitioner->local_size();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline typename Vector<Number, MemorySpace>::const_iterator
+    Vector<Number, MemorySpace>::end() const
+    {
+      return internal::Policy<Number, MemorySpace>::begin(data) +
+             partitioner->local_size();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline Number
+    Vector<Number, MemorySpace>::operator()(const size_type global_index) const
+    {
+      Assert((std::is_same<MemorySpace, ::dealii::MemorySpace::Host>::value),
+             ExcMessage(
+               "This function is only implemented for the Host memory space"));
+      Assert(
+        partitioner->in_local_range(global_index) ||
+          partitioner->ghost_indices().is_element(global_index),
+        ExcAccessToNonLocalElement(global_index,
+                                   partitioner->local_range().first,
+                                   partitioner->local_range().second,
+                                   partitioner->ghost_indices().n_elements()));
+      // do not allow reading a vector which is not in ghost mode
+      Assert(partitioner->in_local_range(global_index) ||
+               vector_is_ghosted == true,
+             ExcMessage("You tried to read a ghost element of this vector, "
+                        "but it has not imported its ghost values."));
+      return data.values[partitioner->global_to_local(global_index)];
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline Number &
+    Vector<Number, MemorySpace>::operator()(const size_type global_index)
+    {
+      Assert((std::is_same<MemorySpace, ::dealii::MemorySpace::Host>::value),
+             ExcMessage(
+               "This function is only implemented for the Host memory space"));
+      Assert(
+        partitioner->in_local_range(global_index) ||
+          partitioner->ghost_indices().is_element(global_index),
+        ExcAccessToNonLocalElement(global_index,
+                                   partitioner->local_range().first,
+                                   partitioner->local_range().second,
+                                   partitioner->ghost_indices().n_elements()));
+      // we would like to prevent reading ghosts from a vector that does not
+      // have them imported, but this is not possible because we might be in a
+      // part of the code where the vector has enabled ghosts but is non-const
+      // (then, the compiler picks this method according to the C++ rule book
+      // even if a human would pick the const method when this subsequent use
+      // is just a read)
+      return data.values[partitioner->global_to_local(global_index)];
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline Number Vector<Number, MemorySpace>::
+                  operator[](const size_type global_index) const
+    {
+      return operator()(global_index);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline Number &Vector<Number, MemorySpace>::
+                   operator[](const size_type global_index)
+    {
+      return operator()(global_index);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline Number
+    Vector<Number, MemorySpace>::local_element(
+      const size_type local_index) const
+    {
+      Assert((std::is_same<MemorySpace, ::dealii::MemorySpace::Host>::value),
+             ExcMessage(
+               "This function is only implemented for the Host memory space"));
+      AssertIndexRange(local_index,
+                       partitioner->local_size() +
+                         partitioner->n_ghost_indices());
+      // do not allow reading a vector which is not in ghost mode
+      Assert(local_index < local_size() || vector_is_ghosted == true,
+             ExcMessage("You tried to read a ghost element of this vector, "
+                        "but it has not imported its ghost values."));
+
+      return data.values[local_index];
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline Number &
+    Vector<Number, MemorySpace>::local_element(const size_type local_index)
+    {
+      Assert((std::is_same<MemorySpace, ::dealii::MemorySpace::Host>::value),
+             ExcMessage(
+               "This function is only implemented for the Host memory space"));
+
+      AssertIndexRange(local_index,
+                       partitioner->local_size() +
+                         partitioner->n_ghost_indices());
+
+      return data.values[local_index];
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline Number *
+    Vector<Number, MemorySpace>::get_values() const
+    {
+      return internal::Policy<Number, MemorySpace>::get_values(data);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    template <typename OtherNumber>
+    inline void
+    Vector<Number, MemorySpace>::extract_subvector_to(
+      const std::vector<size_type> &indices,
+      std::vector<OtherNumber> &    values) const
+    {
+      for (size_type i = 0; i < indices.size(); ++i)
+        values[i] = operator()(indices[i]);
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    template <typename ForwardIterator, typename OutputIterator>
+    inline void
+    Vector<Number, MemorySpace>::extract_subvector_to(
+      ForwardIterator       indices_begin,
+      const ForwardIterator indices_end,
+      OutputIterator        values_begin) const
+    {
+      while (indices_begin != indices_end)
+        {
+          *values_begin = operator()(*indices_begin);
+          indices_begin++;
+          values_begin++;
+        }
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    template <typename OtherNumber>
+    inline void
+    Vector<Number, MemorySpace>::add(
+      const std::vector<size_type> &       indices,
+      const ::dealii::Vector<OtherNumber> &values)
+    {
+      AssertDimension(indices.size(), values.size());
+      for (size_type i = 0; i < indices.size(); ++i)
+        {
+          Assert(
+            numbers::is_finite(values[i]),
+            ExcMessage(
+              "The given value is not finite but either infinite or Not A Number (NaN)"));
+          this->operator()(indices[i]) += values(i);
+        }
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    template <typename OtherNumber>
+    inline void
+    Vector<Number, MemorySpace>::add(const size_type    n_elements,
+                                     const size_type *  indices,
+                                     const OtherNumber *values)
+    {
+      for (size_type i = 0; i < n_elements; ++i, ++indices, ++values)
+        {
+          Assert(
+            numbers::is_finite(*values),
+            ExcMessage(
+              "The given value is not finite but either infinite or Not A Number (NaN)"));
+          this->operator()(*indices) += *values;
+        }
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline const MPI_Comm &
+    Vector<Number, MemorySpace>::get_mpi_communicator() const
+    {
+      return partitioner->get_mpi_communicator();
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline const std::shared_ptr<const Utilities::MPI::Partitioner> &
+    Vector<Number, MemorySpace>::get_partitioner() const
+    {
+      return partitioner;
+    }
+
+
+
+    template <typename Number, typename MemorySpace>
+    inline void
+    Vector<Number, MemorySpace>::set_ghost_state(const bool ghosted) const
+    {
+      vector_is_ghosted = ghosted;
+    }
+
+#endif
+
+  } // namespace SharedMPI
+} // namespace LinearAlgebra
+
+
+/**
+ * Global function @p swap which overloads the default implementation of the
+ * C++ standard library which uses a temporary object. The function simply
+ * exchanges the data of the two vectors.
+ *
+ * @relatesalso Vector
+ * @author Katharina Kormann, Martin Kronbichler, 2011
+ */
+template <typename Number, typename MemorySpace>
+inline void
+swap(LinearAlgebra::SharedMPI::Vector<Number, MemorySpace> &u,
+     LinearAlgebra::SharedMPI::Vector<Number, MemorySpace> &v)
+{
+  u.swap(v);
+}
+
+
+/**
+ * Declare dealii::LinearAlgebra::Vector< Number > as SharedMPI vector.
+ *
+ * @author Uwe Koecher, 2017
+ */
+template <typename Number, typename MemorySpace>
+struct is_serial_vector<LinearAlgebra::SharedMPI::Vector<Number, MemorySpace>>
+  : std::false_type
+{};
+
+
+
+namespace internal
+{
+  namespace LinearOperatorImplementation
+  {
+    template <typename>
+    class ReinitHelper;
+
+    /**
+     * A helper class used internally in linear_operator.h. Specialization for
+     * LinearAlgebra::SharedMPI::Vector<Number>.
+     */
+    template <typename Number>
+    class ReinitHelper<LinearAlgebra::SharedMPI::Vector<Number>>
+    {
+    public:
+      template <typename Matrix>
+      static void
+      reinit_range_vector(const Matrix &                            matrix,
+                          LinearAlgebra::SharedMPI::Vector<Number> &v,
+                          bool omit_zeroing_entries)
+      {
+        matrix.initialize_dof_vector(v);
+        if (!omit_zeroing_entries)
+          v = Number();
+      }
+
+      template <typename Matrix>
+      static void
+      reinit_domain_vector(const Matrix &                            matrix,
+                           LinearAlgebra::SharedMPI::Vector<Number> &v,
+                           bool omit_zeroing_entries)
+      {
+        matrix.initialize_dof_vector(v);
+        if (!omit_zeroing_entries)
+          v = Number();
+      }
+    };
+
+  } // namespace LinearOperatorImplementation
+} /* namespace internal */
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/include/deal.II/lac/la_sm_vector.templates.h b/include/deal.II/lac/la_sm_vector.templates.h
new file mode 100644 (file)
index 0000000..f837049
--- /dev/null
@@ -0,0 +1,2151 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2011 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_la_parallel_vector_templates_h
+#define dealii_la_parallel_vector_templates_h
+
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/cuda_size.h>
+#include <deal.II/base/std_cxx14/memory.h>
+
+#include <deal.II/lac/exceptions.h>
+#include <deal.II/lac/la_sm_vector.h>
+#include <deal.II/lac/petsc_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/vector_operations_internal.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace LinearAlgebra
+{
+  namespace SharedMPI
+  {
+    namespace internal
+    {
+      // In the import_from_ghosted_array_finish we might need to calculate the
+      // maximal and minimal value for the given number type, which is not
+      // straightforward for complex numbers. Therefore, comparison of complex
+      // numbers is prohibited and throws an exception.
+      template <typename Number>
+      Number
+      get_min(const Number a, const Number b)
+      {
+        return std::min(a, b);
+      }
+
+      template <typename Number>
+      std::complex<Number>
+      get_min(const std::complex<Number> a, const std::complex<Number>)
+      {
+        AssertThrow(false,
+                    ExcMessage("VectorOperation::min not "
+                               "implemented for complex numbers"));
+        return a;
+      }
+
+      template <typename Number>
+      Number
+      get_max(const Number a, const Number b)
+      {
+        return std::max(a, b);
+      }
+
+      template <typename Number>
+      std::complex<Number>
+      get_max(const std::complex<Number> a, const std::complex<Number>)
+      {
+        AssertThrow(false,
+                    ExcMessage("VectorOperation::max not "
+                               "implemented for complex numbers"));
+        return a;
+      }
+
+
+
+      // Resize the underlying array on the host or on the device
+      template <typename Number, typename MemorySpaceType>
+      struct la_parallel_vector_templates_functions
+      {
+        static_assert(std::is_same<MemorySpaceType, MemorySpace::Host>::value ||
+                        std::is_same<MemorySpaceType, MemorySpace::CUDA>::value,
+                      "MemorySpace should be Host or CUDA");
+
+        static void
+        resize_val(
+          const types::global_dof_index /*new_alloc_size*/,
+          types::global_dof_index & /*allocated_size*/,
+          ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpaceType>
+            & /*data*/)
+        {}
+
+        static void
+        import(
+          const ::dealii::LinearAlgebra::ReadWriteVector<Number> & /*V*/,
+          ::dealii::VectorOperation::values /*operation*/,
+          const std::shared_ptr<const ::dealii::Utilities::MPI::Partitioner> &
+          /*communication_pattern*/,
+          const IndexSet & /*locally_owned_elem*/,
+          ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpaceType>
+            & /*data*/)
+        {}
+
+        template <typename RealType>
+        static void
+        linfty_norm_local(
+          const ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpaceType>
+            & /*data*/,
+          const unsigned int /*size*/,
+          RealType & /*max*/)
+        {}
+      };
+
+      template <typename Number>
+      struct la_parallel_vector_templates_functions<Number,
+                                                    ::dealii::MemorySpace::Host>
+      {
+        using size_type = types::global_dof_index;
+
+        static void
+        resize_val(const types::global_dof_index new_alloc_size,
+                   types::global_dof_index &     allocated_size,
+                   ::dealii::MemorySpace::
+                     MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
+        {
+          if (new_alloc_size > allocated_size)
+            {
+              Assert(((allocated_size > 0 && data.values != nullptr) ||
+                      data.values == nullptr),
+                     ExcInternalError());
+
+              Number *new_val;
+              Utilities::System::posix_memalign(
+                reinterpret_cast<void **>(&new_val),
+                64,
+                sizeof(Number) * new_alloc_size);
+              data.values.reset(new_val);
+
+              allocated_size = new_alloc_size;
+            }
+          else if (new_alloc_size == 0)
+            {
+              data.values.reset();
+              allocated_size = 0;
+            }
+        }
+
+        static void
+        import(
+          const ::dealii::LinearAlgebra::ReadWriteVector<Number> &V,
+          ::dealii::VectorOperation::values                       operation,
+          const std::shared_ptr<const ::dealii::Utilities::MPI::Partitioner>
+            &             communication_pattern,
+          const IndexSet &locally_owned_elem,
+          ::dealii::MemorySpace::MemorySpaceData<Number,
+                                                 ::dealii::MemorySpace::Host>
+            &data)
+        {
+          Assert(
+            (operation == ::dealii::VectorOperation::add) ||
+              (operation == ::dealii::VectorOperation::insert),
+            ExcMessage(
+              "Only VectorOperation::add and VectorOperation::insert are allowed"));
+
+          ::dealii::LinearAlgebra::SharedMPI::
+            Vector<Number, ::dealii::MemorySpace::Host>
+              tmp_vector(communication_pattern);
+
+          // fill entries from ReadWriteVector into the SharedMPI vector,
+          // including ghost entries. this is not really efficient right now
+          // because indices are translated twice, once by nth_index_in_set(i)
+          // and once for operator() of tmp_vector
+          const IndexSet &v_stored = V.get_stored_elements();
+          for (size_type i = 0; i < v_stored.n_elements(); ++i)
+            tmp_vector(v_stored.nth_index_in_set(i)) = V.local_element(i);
+
+          tmp_vector.compress(operation);
+
+          // Copy the local elements of tmp_vector to the right place in val
+          IndexSet tmp_index_set = tmp_vector.locally_owned_elements();
+          if (operation == VectorOperation::add)
+            {
+              for (size_type i = 0; i < tmp_index_set.n_elements(); ++i)
+                {
+                  data.values[locally_owned_elem.index_within_set(
+                    tmp_index_set.nth_index_in_set(i))] +=
+                    tmp_vector.local_element(i);
+                }
+            }
+          else
+            {
+              for (size_type i = 0; i < tmp_index_set.n_elements(); ++i)
+                {
+                  data.values[locally_owned_elem.index_within_set(
+                    tmp_index_set.nth_index_in_set(i))] =
+                    tmp_vector.local_element(i);
+                }
+            }
+        }
+
+        template <typename RealType>
+        static void
+        linfty_norm_local(const ::dealii::MemorySpace::MemorySpaceData<
+                            Number,
+                            ::dealii::MemorySpace::Host> &data,
+                          const unsigned int              size,
+                          RealType &                      max)
+        {
+          for (size_type i = 0; i < size; ++i)
+            max =
+              std::max(numbers::NumberTraits<Number>::abs(data.values[i]), max);
+        }
+      };
+
+#ifdef DEAL_II_COMPILER_CUDA_AWARE
+      template <typename Number>
+      struct la_parallel_vector_templates_functions<Number,
+                                                    ::dealii::MemorySpace::CUDA>
+      {
+        using size_type = types::global_dof_index;
+
+        static void
+        resize_val(const types::global_dof_index new_alloc_size,
+                   types::global_dof_index &     allocated_size,
+                   ::dealii::MemorySpace::
+                     MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
+        {
+          static_assert(
+            std::is_same<Number, float>::value ||
+              std::is_same<Number, double>::value,
+            "Number should be float or double for CUDA memory space");
+
+          if (new_alloc_size > allocated_size)
+            {
+              Assert(((allocated_size > 0 && data.values_dev != nullptr) ||
+                      data.values_dev == nullptr),
+                     ExcInternalError());
+
+              Number *new_val_dev;
+              Utilities::CUDA::malloc(new_val_dev, new_alloc_size);
+              data.values_dev.reset(new_val_dev);
+
+              allocated_size = new_alloc_size;
+            }
+          else if (new_alloc_size == 0)
+            {
+              data.values_dev.reset();
+              allocated_size = 0;
+            }
+        }
+
+        static void
+        import(const ReadWriteVector<Number> &V,
+               VectorOperation::values        operation,
+               std::shared_ptr<const Utilities::MPI::Partitioner>
+                               communication_pattern,
+               const IndexSet &locally_owned_elem,
+               ::dealii::MemorySpace::
+                 MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
+        {
+          Assert(
+            (operation == ::dealii::VectorOperation::add) ||
+              (operation == ::dealii::VectorOperation::insert),
+            ExcMessage(
+              "Only VectorOperation::add and VectorOperation::insert are allowed"));
+
+          ::dealii::LinearAlgebra::SharedMPI::
+            Vector<Number, ::dealii::MemorySpace::CUDA>
+              tmp_vector(communication_pattern);
+
+          // fill entries from ReadWriteVector into the SharedMPI vector,
+          // including ghost entries. this is not really efficient right now
+          // because indices are translated twice, once by nth_index_in_set(i)
+          // and once for operator() of tmp_vector
+          const IndexSet &       v_stored   = V.get_stored_elements();
+          const size_type        n_elements = v_stored.n_elements();
+          std::vector<size_type> indices(n_elements);
+          for (size_type i = 0; i < n_elements; ++i)
+            indices[i] = communication_pattern->global_to_local(
+              v_stored.nth_index_in_set(i));
+          // Move the indices to the device
+          size_type *indices_dev;
+          ::dealii::Utilities::CUDA::malloc(indices_dev, n_elements);
+          ::dealii::Utilities::CUDA::copy_to_dev(indices, indices_dev);
+          // Move the data to the device
+          Number *V_dev;
+          ::dealii::Utilities::CUDA::malloc(V_dev, n_elements);
+          cudaError_t cuda_error_code = cudaMemcpy(V_dev,
+                                                   V.begin(),
+                                                   n_elements * sizeof(Number),
+                                                   cudaMemcpyHostToDevice);
+          AssertCuda(cuda_error_code);
+
+          // Set the values in tmp_vector
+          const int n_blocks =
+            1 + n_elements / (::dealii::CUDAWrappers::chunk_size *
+                              ::dealii::CUDAWrappers::block_size);
+          ::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated<Number>
+            <<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
+              indices_dev, tmp_vector.begin(), V_dev, n_elements);
+
+          tmp_vector.compress(operation);
+
+          // Copy the local elements of tmp_vector to the right place in val
+          IndexSet        tmp_index_set  = tmp_vector.locally_owned_elements();
+          const size_type tmp_n_elements = tmp_index_set.n_elements();
+          indices.resize(tmp_n_elements);
+          for (size_type i = 0; i < tmp_n_elements; ++i)
+            indices[i] = locally_owned_elem.index_within_set(
+              tmp_index_set.nth_index_in_set(i));
+          ::dealii::Utilities::CUDA::free(indices_dev);
+          ::dealii::Utilities::CUDA::malloc(indices_dev, tmp_n_elements);
+          ::dealii::Utilities::CUDA::copy_to_dev(indices, indices_dev);
+
+          if (operation == VectorOperation::add)
+            ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_permutated<
+              Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
+              indices_dev,
+              data.values_dev.get(),
+              tmp_vector.begin(),
+              tmp_n_elements);
+          else
+            ::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated<
+              Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
+              indices_dev,
+              data.values_dev.get(),
+              tmp_vector.begin(),
+              tmp_n_elements);
+
+          ::dealii::Utilities::CUDA::free(indices_dev);
+          ::dealii::Utilities::CUDA::free(V_dev);
+        }
+
+        template <typename RealType>
+        static void
+        linfty_norm_local(const ::dealii::MemorySpace::MemorySpaceData<
+                            Number,
+                            ::dealii::MemorySpace::CUDA> &data,
+                          const unsigned int              size,
+                          RealType &                      result)
+        {
+          static_assert(std::is_same<Number, RealType>::value,
+                        "RealType should be the same type as Number");
+
+          Number *    result_device;
+          cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
+          AssertCuda(error_code);
+          error_code = cudaMemset(result_device, 0, sizeof(Number));
+
+          const int n_blocks = 1 + size / (::dealii::CUDAWrappers::chunk_size *
+                                           ::dealii::CUDAWrappers::block_size);
+          ::dealii::LinearAlgebra::CUDAWrappers::kernel::reduction<
+            Number,
+            ::dealii::LinearAlgebra::CUDAWrappers::kernel::LInfty<Number>>
+            <<<dim3(n_blocks, 1), dim3(::dealii::CUDAWrappers::block_size)>>>(
+              result_device, data.values_dev.get(), size);
+
+          // Copy the result back to the host
+          error_code = cudaMemcpy(&result,
+                                  result_device,
+                                  sizeof(Number),
+                                  cudaMemcpyDeviceToHost);
+          AssertCuda(error_code);
+          // Free the memory on the device
+          error_code = cudaFree(result_device);
+          AssertCuda(error_code);
+        }
+      };
+#endif
+    } // namespace internal
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::clear_mpi_requests()
+    {
+#ifdef DEAL_II_WITH_MPI
+      for (size_type j = 0; j < compress_requests.size(); j++)
+        {
+          const int ierr = MPI_Request_free(&compress_requests[j]);
+          AssertThrowMPI(ierr);
+        }
+      compress_requests.clear();
+      for (size_type j = 0; j < update_ghost_values_requests.size(); j++)
+        {
+          const int ierr = MPI_Request_free(&update_ghost_values_requests[j]);
+          AssertThrowMPI(ierr);
+        }
+      update_ghost_values_requests.clear();
+#endif
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::resize_val(const size_type new_alloc_size)
+    {
+      internal::la_parallel_vector_templates_functions<
+        Number,
+        MemorySpaceType>::resize_val(new_alloc_size, allocated_size, data);
+
+      thread_loop_partitioner =
+        std::make_shared<::dealii::parallel::internal::TBBPartitioner>();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::reinit(const size_type size,
+                                            const bool omit_zeroing_entries)
+    {
+      clear_mpi_requests();
+
+      // check whether we need to reallocate
+      resize_val(size);
+
+      // delete previous content in import data
+      import_data.values.reset();
+      import_data.values_dev.reset();
+
+      // set partitioner to serial version
+      partitioner = std::make_shared<Utilities::MPI::Partitioner>(size);
+
+      // set entries to zero if so requested
+      if (omit_zeroing_entries == false)
+        this->operator=(Number());
+      else
+        zero_out_ghosts();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    template <typename Number2>
+    void
+    Vector<Number, MemorySpaceType>::reinit(
+      const Vector<Number2, MemorySpaceType> &v,
+      const bool                              omit_zeroing_entries)
+    {
+      clear_mpi_requests();
+      Assert(v.partitioner.get() != nullptr, ExcNotInitialized());
+
+      // check whether the partitioners are
+      // different (check only if the are allocated
+      // differently, not if the actual data is
+      // different)
+      if (partitioner.get() != v.partitioner.get())
+        {
+          partitioner = v.partitioner;
+          const size_type new_allocated_size =
+            partitioner->local_size() + partitioner->n_ghost_indices();
+          resize_val(new_allocated_size);
+        }
+
+      if (omit_zeroing_entries == false)
+        this->operator=(Number());
+      else
+        zero_out_ghosts();
+
+      // do not reallocate import_data directly, but only upon request. It
+      // is only used as temporary storage for compress() and
+      // update_ghost_values, and we might have vectors where we never
+      // call these methods and hence do not need to have the storage.
+      import_data.values.reset();
+      import_data.values_dev.reset();
+
+      thread_loop_partitioner = v.thread_loop_partitioner;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::reinit(
+      const IndexSet &locally_owned_indices,
+      const IndexSet &ghost_indices,
+      const MPI_Comm  communicator)
+    {
+      // set up parallel partitioner with index sets and communicator
+      std::shared_ptr<const Utilities::MPI::Partitioner> new_partitioner(
+        new Utilities::MPI::Partitioner(locally_owned_indices,
+                                        ghost_indices,
+                                        communicator));
+      reinit(new_partitioner);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::reinit(
+      const IndexSet &locally_owned_indices,
+      const MPI_Comm  communicator)
+    {
+      // set up parallel partitioner with index sets and communicator
+      std::shared_ptr<const Utilities::MPI::Partitioner> new_partitioner(
+        new Utilities::MPI::Partitioner(locally_owned_indices, communicator));
+      reinit(new_partitioner);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::reinit(
+      const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_in)
+    {
+      clear_mpi_requests();
+      partitioner = partitioner_in;
+
+      // set vector size and allocate memory
+      const size_type new_allocated_size =
+        partitioner->local_size() + partitioner->n_ghost_indices();
+      resize_val(new_allocated_size);
+
+      // initialize to zero
+      this->operator=(Number());
+
+
+      // do not reallocate import_data directly, but only upon request. It
+      // is only used as temporary storage for compress() and
+      // update_ghost_values, and we might have vectors where we never
+      // call these methods and hence do not need to have the storage.
+      import_data.values.reset();
+      import_data.values_dev.reset();
+
+      vector_is_ghosted = false;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType>::Vector()
+      : partitioner(new Utilities::MPI::Partitioner())
+      , allocated_size(0)
+    {
+      reinit(0);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType>::Vector(
+      const Vector<Number, MemorySpaceType> &v)
+      : Subscriptor()
+      , allocated_size(0)
+      , vector_is_ghosted(false)
+    {
+      reinit(v, true);
+
+      thread_loop_partitioner = v.thread_loop_partitioner;
+
+      const size_type this_size = local_size();
+      if (this_size > 0)
+        {
+          dealii::internal::VectorOperations::
+            functions<Number, Number, MemorySpaceType>::copy(
+              thread_loop_partitioner, partitioner->local_size(), v.data, data);
+        }
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
+                                            const IndexSet &ghost_indices,
+                                            const MPI_Comm  communicator)
+      : allocated_size(0)
+      , vector_is_ghosted(false)
+    {
+      reinit(local_range, ghost_indices, communicator);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
+                                            const MPI_Comm  communicator)
+      : allocated_size(0)
+      , vector_is_ghosted(false)
+    {
+      reinit(local_range, communicator);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType>::Vector(const size_type size)
+      : allocated_size(0)
+      , vector_is_ghosted(false)
+    {
+      reinit(size, false);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType>::Vector(
+      const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner)
+      : allocated_size(0)
+      , vector_is_ghosted(false)
+    {
+      reinit(partitioner);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    inline Vector<Number, MemorySpaceType>::~Vector()
+    {
+      try
+        {
+          clear_mpi_requests();
+        }
+      catch (...)
+        {}
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    inline Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::
+    operator=(const Vector<Number, MemorySpaceType> &c)
+    {
+#ifdef _MSC_VER
+      return this->operator=<Number>(c);
+#else
+      return this->template operator=<Number>(c);
+#endif
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    template <typename Number2>
+    inline Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::
+    operator=(const Vector<Number2, MemorySpaceType> &c)
+    {
+      Assert(c.partitioner.get() != nullptr, ExcNotInitialized());
+
+      // we update ghost values whenever one of the input or output vector
+      // already held ghost values or when we import data from a vector with
+      // the same local range but different ghost layout
+      bool must_update_ghost_values = c.vector_is_ghosted;
+
+      // check whether the two vectors use the same parallel partitioner. if
+      // not, check if all local ranges are the same (that way, we can
+      // exchange data between different parallel layouts). One variant which
+      // is included here and necessary for compatibility with the other
+      // SharedMPI vector classes (Trilinos, PETSc) is the case when vector
+      // c does not have any ghosts (constructed without ghost elements given)
+      // but the current vector does: In that case, we need to exchange data
+      // also when none of the two vector had updated its ghost values before.
+      if (partitioner.get() == nullptr)
+        reinit(c, true);
+      else if (partitioner.get() != c.partitioner.get())
+        {
+          // local ranges are also the same if both partitioners are empty
+          // (even if they happen to define the empty range as [0,0) or [c,c)
+          // for some c!=0 in a different way).
+          int local_ranges_are_identical =
+            (partitioner->local_range() == c.partitioner->local_range() ||
+             (partitioner->local_range().second ==
+                partitioner->local_range().first &&
+              c.partitioner->local_range().second ==
+                c.partitioner->local_range().first));
+          if ((c.partitioner->n_mpi_processes() > 1 &&
+               Utilities::MPI::min(local_ranges_are_identical,
+                                   c.partitioner->get_mpi_communicator()) ==
+                 0) ||
+              !local_ranges_are_identical)
+            reinit(c, true);
+          else
+            must_update_ghost_values |= vector_is_ghosted;
+
+          must_update_ghost_values |=
+            (c.partitioner->ghost_indices_initialized() == false &&
+             partitioner->ghost_indices_initialized() == true);
+        }
+      else
+        must_update_ghost_values |= vector_is_ghosted;
+
+      thread_loop_partitioner = c.thread_loop_partitioner;
+
+      const size_type this_size = partitioner->local_size();
+      if (this_size > 0)
+        {
+          dealii::internal::VectorOperations::
+            functions<Number, Number2, MemorySpaceType>::copy(
+              thread_loop_partitioner, this_size, c.data, data);
+        }
+
+      if (must_update_ghost_values)
+        update_ghost_values();
+      else
+        zero_out_ghosts();
+      return *this;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    template <typename Number2>
+    void
+    Vector<Number, MemorySpaceType>::copy_locally_owned_data_from(
+      const Vector<Number2, MemorySpaceType> &src)
+    {
+      AssertDimension(partitioner->local_size(), src.partitioner->local_size());
+      if (partitioner->local_size() > 0)
+        {
+          dealii::internal::VectorOperations::
+            functions<Number, Number2, MemorySpaceType>::copy(
+              thread_loop_partitioner,
+              partitioner->local_size(),
+              src.data,
+              data);
+        }
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    template <typename MemorySpaceType2>
+    void
+    Vector<Number, MemorySpaceType>::import(
+      const Vector<Number, MemorySpaceType2> &src,
+      VectorOperation::values                 operation)
+    {
+      Assert(src.partitioner.get() != nullptr, ExcNotInitialized());
+      Assert(partitioner->locally_owned_range() ==
+               src.partitioner->locally_owned_range(),
+             ExcMessage("Locally owned indices should be identical."));
+      Assert(partitioner->ghost_indices() == src.partitioner->ghost_indices(),
+             ExcMessage("Ghost indices should be identical."));
+      ::dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::import(
+          thread_loop_partitioner, allocated_size, operation, src.data, data);
+    }
+
+
+
+#ifdef DEAL_II_WITH_PETSC
+
+    namespace petsc_helpers
+    {
+      template <typename PETSC_Number, typename Number>
+      void
+      copy_petsc_vector(const PETSC_Number *petsc_start_ptr,
+                        const PETSC_Number *petsc_end_ptr,
+                        Number *            ptr)
+      {
+        std::copy(petsc_start_ptr, petsc_end_ptr, ptr);
+      }
+
+      template <typename PETSC_Number, typename Number>
+      void
+      copy_petsc_vector(const std::complex<PETSC_Number> *petsc_start_ptr,
+                        const std::complex<PETSC_Number> *petsc_end_ptr,
+                        std::complex<Number> *            ptr)
+      {
+        std::copy(petsc_start_ptr, petsc_end_ptr, ptr);
+      }
+
+      template <typename PETSC_Number, typename Number>
+      void
+      copy_petsc_vector(const std::complex<PETSC_Number> * /*petsc_start_ptr*/,
+                        const std::complex<PETSC_Number> * /*petsc_end_ptr*/,
+                        Number * /*ptr*/)
+      {
+        AssertThrow(false, ExcMessage("Tried to copy complex -> real"));
+      }
+    } // namespace petsc_helpers
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::
+    operator=(const PETScWrappers::MPI::Vector &petsc_vec)
+    {
+      // TODO: We would like to use the same compact infrastructure as for the
+      // Trilinos vector below, but the interface through ReadWriteVector does
+      // not support overlapping (ghosted) PETSc vectors, which we need for
+      // backward compatibility.
+
+      Assert(petsc_vec.locally_owned_elements() == locally_owned_elements(),
+             StandardExceptions::ExcInvalidState());
+
+      // get a representation of the vector and copy it
+      PetscScalar *  start_ptr;
+      PetscErrorCode ierr =
+        VecGetArray(static_cast<const Vec &>(petsc_vec), &start_ptr);
+      AssertThrow(ierr == 0, ExcPETScError(ierr));
+
+      const size_type vec_size = local_size();
+      petsc_helpers::copy_petsc_vector(start_ptr,
+                                       start_ptr + vec_size,
+                                       begin());
+
+      // restore the representation of the vector
+      ierr = VecRestoreArray(static_cast<const Vec &>(petsc_vec), &start_ptr);
+      AssertThrow(ierr == 0, ExcPETScError(ierr));
+
+      // spread ghost values between processes?
+      if (vector_is_ghosted || petsc_vec.has_ghost_elements())
+        update_ghost_values();
+
+      // return a reference to this object per normal c++ operator overloading
+      // semantics
+      return *this;
+    }
+
+#endif
+
+
+
+#ifdef DEAL_II_WITH_TRILINOS
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::
+    operator=(const TrilinosWrappers::MPI::Vector &trilinos_vec)
+    {
+#  ifdef DEAL_II_WITH_MPI
+      IndexSet combined_set = partitioner->locally_owned_range();
+      combined_set.add_indices(partitioner->ghost_indices());
+      ReadWriteVector<Number> rw_vector(combined_set);
+      rw_vector.import(trilinos_vec, VectorOperation::insert);
+      import(rw_vector, VectorOperation::insert);
+
+      if (vector_is_ghosted || trilinos_vec.has_ghost_elements())
+        update_ghost_values();
+#  else
+      AssertThrow(false, ExcNotImplemented());
+#  endif
+
+      return *this;
+    }
+
+#endif
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::compress(
+      ::dealii::VectorOperation::values operation)
+    {
+      compress_start(0, operation);
+      compress_finish(operation);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::update_ghost_values() const
+    {
+      update_ghost_values_start();
+      update_ghost_values_finish();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::zero_out_ghosts() const
+    {
+      if (data.values != nullptr)
+        std::fill_n(data.values.get() + partitioner->local_size(),
+                    partitioner->n_ghost_indices(),
+                    Number());
+#ifdef DEAL_II_COMPILER_CUDA_AWARE
+      if (data.values_dev != nullptr)
+        {
+          const cudaError_t cuda_error_code =
+            cudaMemset(data.values_dev.get() + partitioner->local_size(),
+                       0,
+                       partitioner->n_ghost_indices() * sizeof(Number));
+          AssertCuda(cuda_error_code);
+        }
+#endif
+
+      vector_is_ghosted = false;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::compress_start(
+      const unsigned int                communication_channel,
+      ::dealii::VectorOperation::values operation)
+    {
+      AssertIndexRange(communication_channel, 200);
+      Assert(vector_is_ghosted == false,
+             ExcMessage("Cannot call compress() on a ghosted vector"));
+
+#ifdef DEAL_II_WITH_MPI
+      // make this function thread safe
+      std::lock_guard<std::mutex> lock(mutex);
+
+      // allocate import_data in case it is not set up yet
+      if (partitioner->n_import_indices() > 0)
+        {
+#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+          if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
+            {
+              if (import_data.values_dev == nullptr)
+                import_data.values_dev.reset(
+                  Utilities::CUDA::allocate_device_data<Number>(
+                    partitioner->n_import_indices()));
+            }
+          else
+#  endif
+            {
+#  if !defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+              static_assert(
+                std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
+                "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
+#  endif
+              if (import_data.values == nullptr)
+                {
+                  Number *new_val;
+                  Utilities::System::posix_memalign(
+                    reinterpret_cast<void **>(&new_val),
+                    64,
+                    sizeof(Number) * partitioner->n_import_indices());
+                  import_data.values.reset(new_val);
+                }
+            }
+        }
+
+#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
+    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+      if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
+        {
+          // Move the data to the host and then move it back to the
+          // device. We use values to store the elements because the function
+          // uses a view of the array and thus we need the data on the host to
+          // outlive the scope of the function.
+          Number *new_val;
+          Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_val),
+                                            64,
+                                            sizeof(Number) * allocated_size);
+
+          data.values.reset(new_val);
+
+          cudaError_t cuda_error_code =
+            cudaMemcpy(data.values.get(),
+                       data.values_dev.get(),
+                       allocated_size * sizeof(Number),
+                       cudaMemcpyDeviceToHost);
+          AssertCuda(cuda_error_code);
+        }
+#  endif
+
+#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+      if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
+        {
+          partitioner->import_from_ghosted_array_start(
+            operation,
+            communication_channel,
+            ArrayView<Number, MemorySpace::CUDA>(
+              data.values_dev.get() + partitioner->local_size(),
+              partitioner->n_ghost_indices()),
+            ArrayView<Number, MemorySpace::CUDA>(
+              import_data.values_dev.get(), partitioner->n_import_indices()),
+            compress_requests);
+        }
+      else
+#  endif
+        {
+          partitioner->import_from_ghosted_array_start(
+            operation,
+            communication_channel,
+            ArrayView<Number, MemorySpace::Host>(
+              data.values.get() + partitioner->local_size(),
+              partitioner->n_ghost_indices()),
+            ArrayView<Number, MemorySpace::Host>(
+              import_data.values.get(), partitioner->n_import_indices()),
+            compress_requests);
+        }
+#else
+      (void)communication_channel;
+      (void)operation;
+#endif
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::compress_finish(
+      ::dealii::VectorOperation::values operation)
+    {
+#ifdef DEAL_II_WITH_MPI
+      vector_is_ghosted = false;
+
+      // in order to zero ghost part of the vector, we need to call
+      // import_from_ghosted_array_finish() regardless of
+      // compress_requests.size() == 0
+
+      // make this function thread safe
+      std::lock_guard<std::mutex> lock(mutex);
+#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+      if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+        {
+          Assert(partitioner->n_import_indices() == 0 ||
+                   import_data.values_dev != nullptr,
+                 ExcNotInitialized());
+          partitioner
+            ->import_from_ghosted_array_finish<Number, MemorySpace::CUDA>(
+              operation,
+              ArrayView<const Number, MemorySpace::CUDA>(
+                import_data.values_dev.get(), partitioner->n_import_indices()),
+              ArrayView<Number, MemorySpace::CUDA>(data.values_dev.get(),
+                                                   partitioner->local_size()),
+              ArrayView<Number, MemorySpace::CUDA>(
+                data.values_dev.get() + partitioner->local_size(),
+                partitioner->n_ghost_indices()),
+              compress_requests);
+        }
+      else
+#  endif
+        {
+          Assert(partitioner->n_import_indices() == 0 ||
+                   import_data.values != nullptr,
+                 ExcNotInitialized());
+          partitioner
+            ->import_from_ghosted_array_finish<Number, MemorySpace::Host>(
+              operation,
+              ArrayView<const Number, MemorySpace::Host>(
+                import_data.values.get(), partitioner->n_import_indices()),
+              ArrayView<Number, MemorySpace::Host>(data.values.get(),
+                                                   partitioner->local_size()),
+              ArrayView<Number, MemorySpace::Host>(
+                data.values.get() + partitioner->local_size(),
+                partitioner->n_ghost_indices()),
+              compress_requests);
+        }
+
+#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
+    !defined  DEAL_II_MPI_WITH_CUDA_SUPPORT
+      // The communication is done on the host, so we need to
+      // move the data back to the device.
+      if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+        {
+          cudaError_t cuda_error_code =
+            cudaMemcpy(data.values_dev.get(),
+                       data.values.get(),
+                       allocated_size * sizeof(Number),
+                       cudaMemcpyHostToDevice);
+          AssertCuda(cuda_error_code);
+
+          data.values.reset();
+        }
+#  endif
+#else
+      (void)operation;
+#endif
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::update_ghost_values_start(
+      const unsigned int communication_channel) const
+    {
+      AssertIndexRange(communication_channel, 200);
+#ifdef DEAL_II_WITH_MPI
+      // nothing to do when we neither have import nor ghost indices.
+      if (partitioner->n_ghost_indices() == 0 &&
+          partitioner->n_import_indices() == 0)
+        return;
+
+      // make this function thread safe
+      std::lock_guard<std::mutex> lock(mutex);
+
+      // allocate import_data in case it is not set up yet
+      if (partitioner->n_import_indices() > 0)
+        {
+#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+          Assert(
+            (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value),
+            ExcMessage(
+              "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!"));
+          if (import_data.values_dev == nullptr)
+            import_data.values_dev.reset(
+              Utilities::CUDA::allocate_device_data<Number>(
+                partitioner->n_import_indices()));
+#  else
+#    ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
+          static_assert(
+            std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
+            "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
+#    endif
+          if (import_data.values == nullptr)
+            {
+              Number *new_val;
+              Utilities::System::posix_memalign(
+                reinterpret_cast<void **>(&new_val),
+                64,
+                sizeof(Number) * partitioner->n_import_indices());
+              import_data.values.reset(new_val);
+            }
+#  endif
+        }
+
+#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
+    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+      // Move the data to the host and then move it back to the
+      // device. We use values to store the elements because the function
+      // uses a view of the array and thus we need the data on the host to
+      // outlive the scope of the function.
+      Number *new_val;
+      Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_val),
+                                        64,
+                                        sizeof(Number) * allocated_size);
+
+      data.values.reset(new_val);
+
+      cudaError_t cuda_error_code = cudaMemcpy(data.values.get(),
+                                               data.values_dev.get(),
+                                               allocated_size * sizeof(Number),
+                                               cudaMemcpyDeviceToHost);
+      AssertCuda(cuda_error_code);
+#  endif
+
+#  if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
+      partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
+        communication_channel,
+        ArrayView<const Number, MemorySpace::Host>(data.values.get(),
+                                                   partitioner->local_size()),
+        ArrayView<Number, MemorySpace::Host>(import_data.values.get(),
+                                             partitioner->n_import_indices()),
+        ArrayView<Number, MemorySpace::Host>(data.values.get() +
+                                               partitioner->local_size(),
+                                             partitioner->n_ghost_indices()),
+        update_ghost_values_requests);
+#  else
+      partitioner->export_to_ghosted_array_start<Number, MemorySpace::CUDA>(
+        communication_channel,
+        ArrayView<const Number, MemorySpace::CUDA>(data.values_dev.get(),
+                                                   partitioner->local_size()),
+        ArrayView<Number, MemorySpace::CUDA>(import_data.values_dev.get(),
+                                             partitioner->n_import_indices()),
+        ArrayView<Number, MemorySpace::CUDA>(data.values_dev.get() +
+                                               partitioner->local_size(),
+                                             partitioner->n_ghost_indices()),
+        update_ghost_values_requests);
+#  endif
+
+#else
+      (void)communication_channel;
+#endif
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::update_ghost_values_finish() const
+    {
+#ifdef DEAL_II_WITH_MPI
+      // wait for both sends and receives to complete, even though only
+      // receives are really necessary. this gives (much) better performance
+      AssertDimension(partitioner->ghost_targets().size() +
+                        partitioner->import_targets().size(),
+                      update_ghost_values_requests.size());
+      if (update_ghost_values_requests.size() > 0)
+        {
+          // make this function thread safe
+          std::lock_guard<std::mutex> lock(mutex);
+
+#  if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
+          partitioner->export_to_ghosted_array_finish(
+            ArrayView<Number, MemorySpace::Host>(
+              data.values.get() + partitioner->local_size(),
+              partitioner->n_ghost_indices()),
+            update_ghost_values_requests);
+#  else
+          partitioner->export_to_ghosted_array_finish(
+            ArrayView<Number, MemorySpace::CUDA>(
+              data.values_dev.get() + partitioner->local_size(),
+              partitioner->n_ghost_indices()),
+            update_ghost_values_requests);
+#  endif
+        }
+
+#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
+    !defined  DEAL_II_MPI_WITH_CUDA_SUPPORT
+      // The communication is done on the host, so we need to
+      // move the data back to the device.
+      if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+        {
+          cudaError_t cuda_error_code =
+            cudaMemcpy(data.values_dev.get() + partitioner->local_size(),
+                       data.values.get() + partitioner->local_size(),
+                       partitioner->n_ghost_indices() * sizeof(Number),
+                       cudaMemcpyHostToDevice);
+          AssertCuda(cuda_error_code);
+
+          data.values.reset();
+        }
+#  endif
+
+#endif
+      vector_is_ghosted = true;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::import(
+      const ReadWriteVector<Number> &                 V,
+      VectorOperation::values                         operation,
+      std::shared_ptr<const CommunicationPatternBase> communication_pattern)
+    {
+      // If no communication pattern is given, create one. Otherwise, use the
+      // given one.
+      std::shared_ptr<const Utilities::MPI::Partitioner> comm_pattern;
+      if (communication_pattern.get() == nullptr)
+        {
+          // Split the IndexSet of V in locally owned elements and ghost indices
+          // then create the communication pattern
+          IndexSet locally_owned_elem = locally_owned_elements();
+          IndexSet ghost_indices      = V.get_stored_elements();
+          ghost_indices.subtract_set(locally_owned_elem);
+          comm_pattern = std::make_shared<Utilities::MPI::Partitioner>(
+            locally_owned_elem, ghost_indices, get_mpi_communicator());
+        }
+      else
+        {
+          comm_pattern =
+            std::dynamic_pointer_cast<const Utilities::MPI::Partitioner>(
+              communication_pattern);
+          AssertThrow(comm_pattern != nullptr,
+                      ExcMessage("The communication pattern is not of type "
+                                 "Utilities::MPI::Partitioner."));
+        }
+      Vector<Number, ::dealii::MemorySpace::Host> tmp_vector(comm_pattern);
+
+      data.copy_to(tmp_vector.begin(), local_size());
+
+      // fill entries from ReadWriteVector into the SharedMPI vector,
+      // including ghost entries. this is not really efficient right now
+      // because indices are translated twice, once by nth_index_in_set(i) and
+      // once for operator() of tmp_vector
+      const IndexSet &v_stored     = V.get_stored_elements();
+      const size_type v_n_elements = v_stored.n_elements();
+      switch (operation)
+        {
+          case VectorOperation::insert:
+            {
+              for (size_type i = 0; i < v_n_elements; ++i)
+                tmp_vector(v_stored.nth_index_in_set(i)) = V.local_element(i);
+
+              break;
+            }
+          case VectorOperation::add:
+            {
+              for (size_type i = 0; i < v_n_elements; ++i)
+                tmp_vector(v_stored.nth_index_in_set(i)) += V.local_element(i);
+
+              break;
+            }
+          case VectorOperation::min:
+            {
+              for (size_type i = 0; i < v_n_elements; ++i)
+                tmp_vector(v_stored.nth_index_in_set(i)) =
+                  internal::get_min(tmp_vector(v_stored.nth_index_in_set(i)),
+                                    V.local_element(i));
+
+              break;
+            }
+          case VectorOperation::max:
+            {
+              for (size_type i = 0; i < v_n_elements; ++i)
+                tmp_vector(v_stored.nth_index_in_set(i)) =
+                  internal::get_max(tmp_vector(v_stored.nth_index_in_set(i)),
+                                    V.local_element(i));
+
+              break;
+            }
+          default:
+            {
+              Assert(false, ExcMessage("This operation is not supported."));
+            }
+        }
+      tmp_vector.compress(operation);
+
+      data.copy_from(tmp_vector.begin(), local_size());
+    }
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::swap(Vector<Number, MemorySpaceType> &v)
+    {
+#ifdef DEAL_II_WITH_MPI
+
+#  ifdef DEBUG
+      if (Utilities::MPI::job_supports_mpi())
+        {
+          // make sure that there are not outstanding requests from updating
+          // ghost values or compress
+          int flag = 1;
+          if (update_ghost_values_requests.size() > 0)
+            {
+              const int ierr = MPI_Testall(update_ghost_values_requests.size(),
+                                           update_ghost_values_requests.data(),
+                                           &flag,
+                                           MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+              Assert(flag == 1,
+                     ExcMessage(
+                       "MPI found unfinished update_ghost_values() requests "
+                       "when calling swap, which is not allowed."));
+            }
+          if (compress_requests.size() > 0)
+            {
+              const int ierr = MPI_Testall(compress_requests.size(),
+                                           compress_requests.data(),
+                                           &flag,
+                                           MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+              Assert(flag == 1,
+                     ExcMessage("MPI found unfinished compress() requests "
+                                "when calling swap, which is not allowed."));
+            }
+        }
+#  endif
+
+      std::swap(compress_requests, v.compress_requests);
+      std::swap(update_ghost_values_requests, v.update_ghost_values_requests);
+#endif
+
+      std::swap(partitioner, v.partitioner);
+      std::swap(thread_loop_partitioner, v.thread_loop_partitioner);
+      std::swap(allocated_size, v.allocated_size);
+      std::swap(data, v.data);
+      std::swap(import_data, v.import_data);
+      std::swap(vector_is_ghosted, v.vector_is_ghosted);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::operator=(const Number s)
+    {
+      const size_type this_size = local_size();
+      if (this_size > 0)
+        {
+          dealii::internal::VectorOperations::
+            functions<Number, Number, MemorySpaceType>::set(
+              thread_loop_partitioner, this_size, s, data);
+        }
+
+      // if we call Vector::operator=0, we want to zero out all the entries
+      // plus ghosts.
+      if (s == Number())
+        zero_out_ghosts();
+
+      return *this;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::reinit(const VectorSpaceVector<Number> &V,
+                                            const bool omit_zeroing_entries)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert(dynamic_cast<const VectorType *>(&V) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &down_V = dynamic_cast<const VectorType &>(V);
+
+      reinit(down_V, omit_zeroing_entries);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::
+    operator+=(const VectorSpaceVector<Number> &vv)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert(dynamic_cast<const VectorType *>(&vv) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+
+      AssertDimension(local_size(), v.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::add_vector(
+          thread_loop_partitioner, partitioner->local_size(), v.data, data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+
+      return *this;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::
+    operator-=(const VectorSpaceVector<Number> &vv)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert(dynamic_cast<const VectorType *>(&vv) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+
+      AssertDimension(local_size(), v.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::subtract_vector(
+          thread_loop_partitioner, partitioner->local_size(), v.data, data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+
+      return *this;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::add(const Number a)
+    {
+      AssertIsFinite(a);
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::add_factor(
+          thread_loop_partitioner, partitioner->local_size(), a, data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::add_local(
+      const Number                     a,
+      const VectorSpaceVector<Number> &vv)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert(dynamic_cast<const VectorType *>(&vv) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+
+      AssertIsFinite(a);
+      AssertDimension(local_size(), v.local_size());
+
+      // nothing to do if a is zero
+      if (a == Number(0.))
+        return;
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::add_av(
+          thread_loop_partitioner, partitioner->local_size(), a, v.data, data);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::add(const Number                     a,
+                                         const VectorSpaceVector<Number> &vv)
+    {
+      add_local(a, vv);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::add(const Number                     a,
+                                         const VectorSpaceVector<Number> &vv,
+                                         const Number                     b,
+                                         const VectorSpaceVector<Number> &ww)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert(dynamic_cast<const VectorType *>(&vv) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+      Assert(dynamic_cast<const VectorType *>(&ww) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &w = dynamic_cast<const VectorType &>(ww);
+
+      AssertIsFinite(a);
+      AssertIsFinite(b);
+
+      AssertDimension(local_size(), v.local_size());
+      AssertDimension(local_size(), w.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::add_avpbw(
+          thread_loop_partitioner,
+          partitioner->local_size(),
+          a,
+          b,
+          v.data,
+          w.data,
+          data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::add(const std::vector<size_type> &indices,
+                                         const std::vector<Number> &   values)
+    {
+      for (std::size_t i = 0; i < indices.size(); ++i)
+        {
+          this->operator()(indices[i]) += values[i];
+        }
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::sadd(
+      const Number                           x,
+      const Vector<Number, MemorySpaceType> &v)
+    {
+      AssertIsFinite(x);
+      AssertDimension(local_size(), v.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::sadd_xv(
+          thread_loop_partitioner, partitioner->local_size(), x, v.data, data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::sadd_local(
+      const Number                     x,
+      const Number                     a,
+      const VectorSpaceVector<Number> &vv)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert((dynamic_cast<const VectorType *>(&vv) != nullptr),
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+
+      AssertIsFinite(x);
+      AssertIsFinite(a);
+      AssertDimension(local_size(), v.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::sadd_xav(
+          thread_loop_partitioner,
+          partitioner->local_size(),
+          x,
+          a,
+          v.data,
+          data);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::sadd(const Number                     x,
+                                          const Number                     a,
+                                          const VectorSpaceVector<Number> &vv)
+    {
+      sadd_local(x, a, vv);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::sadd(
+      const Number                           x,
+      const Number                           a,
+      const Vector<Number, MemorySpaceType> &v,
+      const Number                           b,
+      const Vector<Number, MemorySpaceType> &w)
+    {
+      AssertIsFinite(x);
+      AssertIsFinite(a);
+      AssertIsFinite(b);
+
+      AssertDimension(local_size(), v.local_size());
+      AssertDimension(local_size(), w.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::sadd_xavbw(
+          thread_loop_partitioner,
+          partitioner->local_size(),
+          x,
+          a,
+          b,
+          v.data,
+          w.data,
+          data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::operator*=(const Number factor)
+    {
+      AssertIsFinite(factor);
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::multiply_factor(
+          thread_loop_partitioner, partitioner->local_size(), factor, data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+
+      return *this;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Vector<Number, MemorySpaceType> &
+    Vector<Number, MemorySpaceType>::operator/=(const Number factor)
+    {
+      operator*=(static_cast<Number>(1.) / factor);
+      return *this;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::scale(const VectorSpaceVector<Number> &vv)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert(dynamic_cast<const VectorType *>(&vv) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+
+      AssertDimension(local_size(), v.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::scale(
+          thread_loop_partitioner, local_size(), v.data, data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::equ(const Number                     a,
+                                         const VectorSpaceVector<Number> &vv)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert(dynamic_cast<const VectorType *>(&vv) != nullptr,
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+
+      AssertIsFinite(a);
+      AssertDimension(local_size(), v.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::equ_au(
+          thread_loop_partitioner, partitioner->local_size(), a, v.data, data);
+
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::equ(
+      const Number                           a,
+      const Vector<Number, MemorySpaceType> &v,
+      const Number                           b,
+      const Vector<Number, MemorySpaceType> &w)
+    {
+      AssertIsFinite(a);
+      AssertIsFinite(b);
+
+      AssertDimension(local_size(), v.local_size());
+      AssertDimension(local_size(), w.local_size());
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::equ_aubv(
+          thread_loop_partitioner,
+          partitioner->local_size(),
+          a,
+          b,
+          v.data,
+          w.data,
+          data);
+
+      if (vector_is_ghosted)
+        update_ghost_values();
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    bool
+    Vector<Number, MemorySpaceType>::all_zero() const
+    {
+      return (linfty_norm() == 0) ? true : false;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    template <typename Number2>
+    Number
+    Vector<Number, MemorySpaceType>::inner_product_local(
+      const Vector<Number2, MemorySpaceType> &v) const
+    {
+      if (PointerComparison::equal(this, &v))
+        return norm_sqr_local();
+
+      AssertDimension(partitioner->local_size(), v.partitioner->local_size());
+
+      return dealii::internal::VectorOperations::
+        functions<Number, Number2, MemorySpaceType>::dot(
+          thread_loop_partitioner, partitioner->local_size(), v.data, data);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Number Vector<Number, MemorySpaceType>::
+           operator*(const VectorSpaceVector<Number> &vv) const
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert((dynamic_cast<const VectorType *>(&vv) != nullptr),
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+
+      Number local_result = inner_product_local(v);
+      if (partitioner->n_mpi_processes() > 1)
+        return Utilities::MPI::sum(local_result,
+                                   partitioner->get_mpi_communicator());
+      else
+        return local_result;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::norm_sqr_local() const
+    {
+      real_type sum;
+
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::norm_2(
+          thread_loop_partitioner, partitioner->local_size(), sum, data);
+
+      AssertIsFinite(sum);
+
+      return sum;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Number
+    Vector<Number, MemorySpaceType>::mean_value_local() const
+    {
+      Assert(size() != 0, ExcEmptyObject());
+
+      if (partitioner->local_size() == 0)
+        return Number();
+
+      Number sum = ::dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::mean_value(
+          thread_loop_partitioner, partitioner->local_size(), data);
+
+      return sum / real_type(partitioner->local_size());
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Number
+    Vector<Number, MemorySpaceType>::mean_value() const
+    {
+      Number local_result = mean_value_local();
+      if (partitioner->n_mpi_processes() > 1)
+        return Utilities::MPI::sum(local_result * static_cast<real_type>(
+                                                    partitioner->local_size()),
+                                   partitioner->get_mpi_communicator()) /
+               static_cast<real_type>(partitioner->size());
+      else
+        return local_result;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::l1_norm_local() const
+    {
+      real_type sum;
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::norm_1(
+          thread_loop_partitioner, partitioner->local_size(), sum, data);
+
+      return sum;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::l1_norm() const
+    {
+      real_type local_result = l1_norm_local();
+      if (partitioner->n_mpi_processes() > 1)
+        return Utilities::MPI::sum(local_result,
+                                   partitioner->get_mpi_communicator());
+      else
+        return local_result;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::norm_sqr() const
+    {
+      real_type local_result = norm_sqr_local();
+      if (partitioner->n_mpi_processes() > 1)
+        return Utilities::MPI::sum(local_result,
+                                   partitioner->get_mpi_communicator());
+      else
+        return local_result;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::l2_norm() const
+    {
+      return std::sqrt(norm_sqr());
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::lp_norm_local(const real_type p) const
+    {
+      real_type sum = 0.;
+
+      dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::norm_p(
+          thread_loop_partitioner, partitioner->local_size(), sum, p, data);
+
+      return std::pow(sum, 1. / p);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::lp_norm(const real_type p) const
+    {
+      const real_type local_result = lp_norm_local(p);
+      if (partitioner->n_mpi_processes() > 1)
+        return std::pow(
+          Utilities::MPI::sum(std::pow(local_result, p),
+                              partitioner->get_mpi_communicator()),
+          static_cast<real_type>(1.0 / p));
+      else
+        return local_result;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::linfty_norm_local() const
+    {
+      real_type max = 0.;
+
+      const size_type local_size = partitioner->local_size();
+      internal::la_parallel_vector_templates_functions<
+        Number,
+        MemorySpaceType>::linfty_norm_local(data, local_size, max);
+
+      return max;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    inline typename Vector<Number, MemorySpaceType>::real_type
+    Vector<Number, MemorySpaceType>::linfty_norm() const
+    {
+      const real_type local_result = linfty_norm_local();
+      if (partitioner->n_mpi_processes() > 1)
+        return Utilities::MPI::max(local_result,
+                                   partitioner->get_mpi_communicator());
+      else
+        return local_result;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Number
+    Vector<Number, MemorySpaceType>::add_and_dot_local(
+      const Number                           a,
+      const Vector<Number, MemorySpaceType> &v,
+      const Vector<Number, MemorySpaceType> &w)
+    {
+      const size_type vec_size = partitioner->local_size();
+      AssertDimension(vec_size, v.local_size());
+      AssertDimension(vec_size, w.local_size());
+
+      Number sum = dealii::internal::VectorOperations::
+        functions<Number, Number, MemorySpaceType>::add_and_dot(
+          thread_loop_partitioner, vec_size, a, v.data, w.data, data);
+
+      AssertIsFinite(sum);
+
+      return sum;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    Number
+    Vector<Number, MemorySpaceType>::add_and_dot(
+      const Number                     a,
+      const VectorSpaceVector<Number> &vv,
+      const VectorSpaceVector<Number> &ww)
+    {
+      // Downcast. Throws an exception if invalid.
+      using VectorType = Vector<Number, MemorySpaceType>;
+      Assert((dynamic_cast<const VectorType *>(&vv) != nullptr),
+             ExcVectorTypeNotCompatible());
+      const VectorType &v = dynamic_cast<const VectorType &>(vv);
+      Assert((dynamic_cast<const VectorType *>(&ww) != nullptr),
+             ExcVectorTypeNotCompatible());
+      const VectorType &w = dynamic_cast<const VectorType &>(ww);
+
+      Number local_result = add_and_dot_local(a, v, w);
+      if (partitioner->n_mpi_processes() > 1)
+        return Utilities::MPI::sum(local_result,
+                                   partitioner->get_mpi_communicator());
+      else
+        return local_result;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    inline bool
+    Vector<Number, MemorySpaceType>::partitioners_are_compatible(
+      const Utilities::MPI::Partitioner &part) const
+    {
+      return partitioner->is_compatible(part);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    inline bool
+    Vector<Number, MemorySpaceType>::partitioners_are_globally_compatible(
+      const Utilities::MPI::Partitioner &part) const
+    {
+      return partitioner->is_globally_compatible(part);
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    std::size_t
+    Vector<Number, MemorySpaceType>::memory_consumption() const
+    {
+      std::size_t memory = sizeof(*this);
+      memory += sizeof(Number) * static_cast<std::size_t>(allocated_size);
+
+      // if the partitioner is shared between more processors, just count a
+      // fraction of that memory, since we're not actually using more memory
+      // for it.
+      if (partitioner.use_count() > 0)
+        memory +=
+          partitioner->memory_consumption() / partitioner.use_count() + 1;
+      if (import_data.values != nullptr || import_data.values_dev != nullptr)
+        memory += (static_cast<std::size_t>(partitioner->n_import_indices()) *
+                   sizeof(Number));
+      return memory;
+    }
+
+
+
+    template <typename Number, typename MemorySpaceType>
+    void
+    Vector<Number, MemorySpaceType>::print(std::ostream &     out,
+                                           const unsigned int precision,
+                                           const bool         scientific,
+                                           const bool         across) const
+    {
+      Assert(partitioner.get() != nullptr, ExcInternalError());
+      AssertThrow(out, ExcIO());
+      std::ios::fmtflags old_flags     = out.flags();
+      unsigned int       old_precision = out.precision(precision);
+
+      out.precision(precision);
+      if (scientific)
+        out.setf(std::ios::scientific, std::ios::floatfield);
+      else
+        out.setf(std::ios::fixed, std::ios::floatfield);
+
+        // to make the vector write out all the information in order, use as
+        // many barriers as there are processors and start writing when it's our
+        // turn
+#ifdef DEAL_II_WITH_MPI
+      if (partitioner->n_mpi_processes() > 1)
+        for (unsigned int i = 0; i < partitioner->this_mpi_process(); i++)
+          {
+            const int ierr = MPI_Barrier(partitioner->get_mpi_communicator());
+            AssertThrowMPI(ierr);
+          }
+#endif
+
+      std::vector<Number> stored_elements(allocated_size);
+      data.copy_to(stored_elements.data(), allocated_size);
+
+      out << "Process #" << partitioner->this_mpi_process() << std::endl
+          << "Local range: [" << partitioner->local_range().first << ", "
+          << partitioner->local_range().second
+          << "), global size: " << partitioner->size() << std::endl
+          << "Vector data:" << std::endl;
+      if (across)
+        for (size_type i = 0; i < partitioner->local_size(); ++i)
+          out << stored_elements[i] << ' ';
+      else
+        for (size_type i = 0; i < partitioner->local_size(); ++i)
+          out << stored_elements[i] << std::endl;
+      out << std::endl;
+
+      if (vector_is_ghosted)
+        {
+          out << "Ghost entries (global index / value):" << std::endl;
+          if (across)
+            for (size_type i = 0; i < partitioner->n_ghost_indices(); ++i)
+              out << '(' << partitioner->ghost_indices().nth_index_in_set(i)
+                  << '/' << stored_elements[partitioner->local_size() + i]
+                  << ") ";
+          else
+            for (size_type i = 0; i < partitioner->n_ghost_indices(); ++i)
+              out << '(' << partitioner->ghost_indices().nth_index_in_set(i)
+                  << '/' << stored_elements[partitioner->local_size() + i]
+                  << ")" << std::endl;
+          out << std::endl;
+        }
+      out << std::flush;
+
+#ifdef DEAL_II_WITH_MPI
+      if (partitioner->n_mpi_processes() > 1)
+        {
+          int ierr = MPI_Barrier(partitioner->get_mpi_communicator());
+          AssertThrowMPI(ierr);
+
+          for (unsigned int i = partitioner->this_mpi_process() + 1;
+               i < partitioner->n_mpi_processes();
+               i++)
+            {
+              ierr = MPI_Barrier(partitioner->get_mpi_communicator());
+              AssertThrowMPI(ierr);
+            }
+        }
+#endif
+
+      AssertThrow(out, ExcIO());
+      // reset output format
+      out.flags(old_flags);
+      out.precision(old_precision);
+    }
+
+  } // end of namespace SharedMPI
+} // end of namespace LinearAlgebra
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index 7d0862977a4abfab96870ebfdf4bef5181eba1d2..cb43b3e2b746ec4fd585d3109718c403bfb5deaa 100644 (file)
@@ -30,6 +30,7 @@ SET(_unity_include_src
   la_vector.cc
   la_parallel_vector.cc
   la_parallel_block_vector.cc
+  la_sm_vector.cc
   matrix_lib.cc
   matrix_out.cc
   precondition_block.cc
@@ -80,6 +81,7 @@ SET(_inst
   la_vector.inst.in
   la_parallel_vector.inst.in
   la_parallel_block_vector.inst.in
+  la_sm_vector.inst.in
   precondition_block.inst.in
   relaxation_block.inst.in
   read_write_vector.inst.in
diff --git a/source/lac/la_sm_vector.cc b/source/lac/la_sm_vector.cc
new file mode 100644 (file)
index 0000000..28d04e2
--- /dev/null
@@ -0,0 +1,49 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/lac/la_sm_vector.h>
+#include <deal.II/lac/la_sm_vector.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+#include "la_sm_vector.inst"
+
+// do a few functions that currently don't fit the scheme because they have
+// two template arguments that need to be different (the case of same
+// arguments is covered by the default copy constructor and copy operator that
+// is declared separately)
+
+namespace LinearAlgebra
+{
+  namespace SharedMPI
+  {
+#define TEMPL_COPY_CONSTRUCTOR(S1, S2)                  \
+  template Vector<S1, ::dealii::MemorySpace::Host>      \
+    &Vector<S1, ::dealii::MemorySpace::Host>::operator= \
+      <S2>(const Vector<S2, ::dealii::MemorySpace::Host> &)
+
+    TEMPL_COPY_CONSTRUCTOR(double, float);
+    TEMPL_COPY_CONSTRUCTOR(float, double);
+#ifdef DEAL_II_WITH_COMPLEX_VALUES
+    TEMPL_COPY_CONSTRUCTOR(std::complex<double>, std::complex<float>);
+    TEMPL_COPY_CONSTRUCTOR(std::complex<float>, std::complex<double>);
+#endif
+
+#undef TEMPL_COPY_CONSTRUCTOR
+  } // namespace SharedMPI
+} // namespace LinearAlgebra
+
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/source/lac/la_sm_vector.inst.in b/source/lac/la_sm_vector.inst.in
new file mode 100644 (file)
index 0000000..d58cdb2
--- /dev/null
@@ -0,0 +1,73 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2011 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (SCALAR : REAL_AND_COMPLEX_SCALARS)
+  {
+    namespace LinearAlgebra
+    \{
+      namespace SharedMPI
+      \{
+        template class Vector<SCALAR, ::dealii::MemorySpace::Host>;
+        template void
+        Vector<SCALAR, ::dealii::MemorySpace::Host>::import<
+          ::dealii::MemorySpace::Host>(
+          const Vector<SCALAR, ::dealii::MemorySpace::Host> &,
+          VectorOperation::values);
+      \}
+    \}
+  }
+
+for (S1 : REAL_AND_COMPLEX_SCALARS; S2 : REAL_SCALARS)
+  {
+    namespace LinearAlgebra
+    \{
+      namespace SharedMPI
+      \{
+        template void
+        Vector<S1, ::dealii::MemorySpace::Host>::reinit<S2>(
+          const Vector<S2, ::dealii::MemorySpace::Host> &,
+          const bool);
+        template S1
+        Vector<S1, ::dealii::MemorySpace::Host>::inner_product_local<S2>(
+          const Vector<S2, ::dealii::MemorySpace::Host> &) const;
+        template void
+        Vector<S1, ::dealii::MemorySpace::Host>::copy_locally_owned_data_from<
+          S2>(const Vector<S2, ::dealii::MemorySpace::Host> &);
+      \}
+    \}
+  }
+
+
+for (S1, S2 : COMPLEX_SCALARS)
+  {
+    namespace LinearAlgebra
+    \{
+      namespace SharedMPI
+      \{
+        template void
+        Vector<S1, ::dealii::MemorySpace::Host>::reinit<S2>(
+          const Vector<S2, ::dealii::MemorySpace::Host> &,
+          const bool);
+        template S1
+        Vector<S1, ::dealii::MemorySpace::Host>::inner_product_local<S2>(
+          const Vector<S2, ::dealii::MemorySpace::Host> &) const;
+        template void
+        Vector<S1, ::dealii::MemorySpace::Host>::copy_locally_owned_data_from<
+          S2>(const Vector<S2, ::dealii::MemorySpace::Host> &);
+      \}
+    \}
+  }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.