From ab997de9acdb44083e7a399c7379be67cd7e1598 Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Sat, 21 Mar 2020 10:21:58 +0100 Subject: [PATCH] Copy vector --- include/deal.II/lac/la_sm_vector.h | 1931 ++++++++++++++++ include/deal.II/lac/la_sm_vector.templates.h | 2151 ++++++++++++++++++ source/lac/CMakeLists.txt | 2 + source/lac/la_sm_vector.cc | 49 + source/lac/la_sm_vector.inst.in | 73 + 5 files changed, 4206 insertions(+) create mode 100644 include/deal.II/lac/la_sm_vector.h create mode 100644 include/deal.II/lac/la_sm_vector.templates.h create mode 100644 source/lac/la_sm_vector.cc create mode 100644 source/lac/la_sm_vector.inst.in diff --git a/include/deal.II/lac/la_sm_vector.h b/include/deal.II/lac/la_sm_vector.h new file mode 100644 index 0000000000..eb2c206cac --- /dev/null +++ b/include/deal.II/lac/la_sm_vector.h @@ -0,0 +1,1931 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2011 - 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_la_sm_vector_h +#define dealii_la_sm_vector_h + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +DEAL_II_NAMESPACE_OPEN + +// Forward declarations +#ifndef DOXYGEN +namespace LinearAlgebra +{ + /** + * A namespace for parallel implementations of vectors. + */ + namespace SharedMPI + { + template + class BlockVector; + } + + template + class ReadWriteVector; +} // namespace LinearAlgebra + +# ifdef DEAL_II_WITH_PETSC +namespace PETScWrappers +{ + namespace MPI + { + class Vector; + } +} // namespace PETScWrappers +# endif + +# ifdef DEAL_II_WITH_TRILINOS +namespace TrilinosWrappers +{ + namespace MPI + { + class Vector; + } +} // namespace TrilinosWrappers +# endif +#endif + +namespace LinearAlgebra +{ + namespace SharedMPI + { + /*! @addtogroup Vectors + *@{ + */ + + /** + * Implementation of a parallel vector class. The design of this class is + * similar to the standard ::dealii::Vector class in deal.II, with the + * exception that storage is SharedMPI with MPI. + * + * The vector is designed for the following scheme of parallel + * partitioning: + *
    + *
  • The indices held by individual processes (locally owned part) in + * the MPI parallelization form a contiguous range + * [my_first_index,my_last_index). + *
  • Ghost indices residing on arbitrary positions of other processors + * are allowed. It is in general more efficient if ghost indices are + * clustered, since they are stored as a set of intervals. The + * communication pattern of the ghost indices is determined when calling + * the function reinit (locally_owned, ghost_indices, + * communicator), and retained until the partitioning is changed. + * This allows for efficient parallel communication of indices. In + * particular, it stores the communication pattern, rather than having to + * compute it again for every communication. For more information on ghost + * vectors, see also the + * @ref GlossGhostedVector "glossary entry on vectors with ghost elements". + *
  • Besides the usual global access operator() it is also possible to + * access vector entries in the local index space with the function @p + * local_element(). Locally owned indices are placed first, [0, + * local_size()), and then all ghost indices follow after them + * contiguously, [local_size(), local_size()+n_ghost_entries()). + *
+ * + * Functions related to parallel functionality: + *
    + *
  • The function compress() goes through the data + * associated with ghost indices and communicates it to the owner process, + * which can then add it to the correct position. This can be used e.g. + * after having run an assembly routine involving ghosts that fill this + * vector. Note that the @p insert mode of @p compress() does not set the + * elements included in ghost entries but simply discards them, assuming + * that the owning processor has set them to the desired value already + * (See also the + * @ref GlossCompress "glossary entry on compress"). + *
  • The update_ghost_values() function imports the data + * from the owning processor to the ghost indices in order to provide read + * access to the data associated with ghosts. + *
  • It is possible to split the above functions into two phases, where + * the first initiates the communication and the second one finishes it. + * These functions can be used to overlap communication with computations + * in other parts of the code. + *
  • Of course, reduction operations (like norms) make use of + * collective all-to-all MPI communications. + *
+ * + * This vector can take two different states with respect to ghost + * elements: + *
    + *
  • After creation and whenever zero_out_ghosts() is called (or + * operator= (0.)), the vector does only allow writing into + * ghost elements but not reading from ghost elements. + *
  • After a call to update_ghost_values(), the vector does not allow + * writing into ghost elements but only reading from them. This is to + * avoid undesired ghost data artifacts when calling compress() after + * modifying some vector entries. The current status of the ghost entries + * (read mode or write mode) can be queried by the method + * has_ghost_elements(), which returns true exactly when + * ghost elements have been updated and false otherwise, + * irrespective of the actual number of ghost entries in the vector layout + * (for that information, use n_ghost_entries() instead). + *
+ * + * This vector uses the facilities of the class dealii::Vector for + * implementing the operations on the local range of the vector. In + * particular, it also inherits thread parallelism that splits most + * vector-vector operations into smaller chunks if the program uses + * multiple threads. This may or may not be desired when working also with + * MPI. + * + *

Limitations regarding the vector size

+ * + * This vector class is based on two different number types for indexing. + * The so-called global index type encodes the overall size of the vector. + * Its type is types::global_dof_index. The largest possible value is + * 2^32-1 or approximately 4 billion in case 64 bit integers + * are disabled at configuration of deal.II (default case) or + * 2^64-1 or approximately 10^19 if 64 bit + * integers are enabled (see the glossary entry on + * @ref GlobalDoFIndex + * for further information). + * + * The second relevant index type is the local index used within one MPI + * rank. As opposed to the global index, the implementation assumes 32-bit + * unsigned integers unconditionally. In other words, to actually use a + * vector with more than four billion entries, you need to use MPI with + * more than one rank (which in general is a safe assumption since four + * billion entries consume at least 16 GB of memory for floats or 32 GB of + * memory for doubles) and enable 64-bit indices. If more than 4 billion + * local elements are present, the implementation tries to detect that, + * which triggers an exception and aborts the code. Note, however, that + * the detection of overflow is tricky and the detection mechanism might + * fail in some circumstances. Therefore, it is strongly recommended to + * not rely on this class to automatically detect the unsupported case. + * + *

CUDA support

+ * + * This vector class supports two different memory spaces: Host and CUDA. By + * default, the memory space is Host and all the data are allocated on the + * CPU. When the memory space is CUDA, all the data is allocated on the GPU. + * The operations on the vector are performed on the chosen memory space. * + * From the host, there are two methods to access the elements of the Vector + * when using the CUDA memory space: + *
    + *
  • use get_values(): + * @code + * Vector vector(local_range, comm); + * double* vector_dev = vector.get_values(); + * std::vector vector_host(local_range.n_elements(), 1.); + * Utilities::CUDA::copy_to_dev(vector_host, vector_dev); + * @endcode + *
  • use import(): + * @code + * Vector vector(local_range, comm); + * ReadWriteVector rw_vector(local_range); + * for (auto & val : rw_vector) + * val = 1.; + * vector.import(rw_vector, VectorOperations::insert); + * @endcode + *
+ * The import method is a lot safer and will perform an MPI communication if + * necessary. Since an MPI communication may be performed, import needs to + * be called on all the processors. + * + * @note By default, all the ranks will try to access the device 0. This is + * fine is if you have one rank per node and one gpu per node. If you + * have multiple GPUs on one node, we need each process to access a + * different GPU. If each node has the same number of GPUs, this can be done + * as follows: + * int n_devices = 0; cudaGetDeviceCount(&n_devices); int + * device_id = my_rank % n_devices; + * cudaSetDevice(device_id); + * + * @see CUDAWrappers + * + * @author Katharina Kormann, Martin Kronbichler, Bruno Turcksin 2010, 2011, + * 2016, 2018 + */ + template + class Vector : public ::dealii::LinearAlgebra::VectorSpaceVector, + public Subscriptor + { + public: + using memory_space = MemorySpace; + using value_type = Number; + using pointer = value_type *; + using const_pointer = const value_type *; + using iterator = value_type *; + using const_iterator = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + using size_type = types::global_dof_index; + using real_type = typename numbers::NumberTraits::real_type; + + static_assert( + std::is_same::value || + std::is_same::value, + "MemorySpace should be Host or CUDA"); + + /** + * @name 1: Basic Object-handling + */ + //@{ + /** + * Empty constructor. + */ + Vector(); + + /** + * Copy constructor. Uses the parallel partitioning of @p in_vector. + * It should be noted that this constructor automatically sets ghost + * values to zero. Call @p update_ghost_values() directly following + * construction if a ghosted vector is required. + */ + Vector(const Vector &in_vector); + + /** + * Construct a parallel vector of the given global size without any + * actual parallel distribution. + */ + Vector(const size_type size); + + /** + * Construct a parallel vector. The local range is specified by @p + * locally_owned_set (note that this must be a contiguous interval, + * multiple intervals are not possible). The IndexSet @p ghost_indices + * specifies ghost indices, i.e., indices which one might need to read + * data from or accumulate data from. It is allowed that the set of + * ghost indices also contains the local range, but it does not need to. + * + * This function involves global communication, so it should only be + * called once for a given layout. Use the constructor with + * Vector argument to create additional vectors with the same + * parallel layout. + * + * @see + * @ref GlossGhostedVector "vectors with ghost elements" + */ + Vector(const IndexSet &local_range, + const IndexSet &ghost_indices, + const MPI_Comm communicator); + + /** + * Same constructor as above but without any ghost indices. + */ + Vector(const IndexSet &local_range, const MPI_Comm communicator); + + /** + * Create the vector based on the parallel partitioning described in @p + * partitioner. The input argument is a shared pointer, which store the + * partitioner data only once and share it between several vectors with + * the same layout. + */ + Vector( + const std::shared_ptr &partitioner); + + /** + * Destructor. + */ + virtual ~Vector() override; + + /** + * Set the global size of the vector to @p size without any actual + * parallel distribution. + */ + void + reinit(const size_type size, const bool omit_zeroing_entries = false); + + /** + * Uses the parallel layout of the input vector @p in_vector and + * allocates memory for this vector. Recommended initialization function + * when several vectors with the same layout should be created. + * + * If the flag @p omit_zeroing_entries is set to false, the memory will + * be initialized with zero, otherwise the memory will be untouched (and + * the user must make sure to fill it with reasonable data before using + * it). + */ + template + void + reinit(const Vector &in_vector, + const bool omit_zeroing_entries = false); + + /** + * Initialize the vector. The local range is specified by @p + * locally_owned_set (note that this must be a contiguous interval, + * multiple intervals are not possible). The IndexSet @p ghost_indices + * specifies ghost indices, i.e., indices which one might need to read + * data from or accumulate data from. It is allowed that the set of + * ghost indices also contains the local range, but it does not need to. + * + * This function involves global communication, so it should only be + * called once for a given layout. Use the @p reinit function with + * Vector argument to create additional vectors with the same + * parallel layout. + * + * @see + * @ref GlossGhostedVector "vectors with ghost elements" + */ + void + reinit(const IndexSet &local_range, + const IndexSet &ghost_indices, + const MPI_Comm communicator); + + /** + * Same as above, but without ghost entries. + */ + void + reinit(const IndexSet &local_range, const MPI_Comm communicator); + + /** + * Initialize the vector given to the parallel partitioning described in + * @p partitioner. The input argument is a shared pointer, which store + * the partitioner data only once and share it between several vectors + * with the same layout. + */ + void + reinit( + const std::shared_ptr &partitioner); + + /** + * Swap the contents of this vector and the other vector @p v. One could + * do this operation with a temporary variable and copying over the data + * elements, but this function is significantly more efficient since it + * only swaps the pointers to the data of the two vectors and therefore + * does not need to allocate temporary storage and move data around. + * + * This function is analogous to the @p swap function of all C++ + * standard containers. Also, there is a global function + * swap(u,v) that simply calls u.swap(v), again in + * analogy to standard functions. + * + * This function is virtual in order to allow for derived classes to + * handle memory separately. + */ + void + swap(Vector &v); + + /** + * Assigns the vector to the parallel partitioning of the input vector + * @p in_vector, and copies all the data. + * + * If one of the input vector or the calling vector (to the left of the + * assignment operator) had ghost elements set before this operation, + * the calling vector will have ghost values set. Otherwise, it will be + * in write mode. If the input vector does not have any ghost elements + * at all, the vector will also update its ghost values in analogy to + * the respective setting the Trilinos and PETSc vectors. + */ + Vector & + operator=(const Vector &in_vector); + + /** + * Assigns the vector to the parallel partitioning of the input vector + * @p in_vector, and copies all the data. + * + * If one of the input vector or the calling vector (to the left of the + * assignment operator) had ghost elements set before this operation, + * the calling vector will have ghost values set. Otherwise, it will be + * in write mode. If the input vector does not have any ghost elements + * at all, the vector will also update its ghost values in analogy to + * the respective setting the Trilinos and PETSc vectors. + */ + template + Vector & + operator=(const Vector &in_vector); + +#ifdef DEAL_II_WITH_PETSC + /** + * Copy the content of a PETSc vector into the calling vector. This + * function assumes that the vectors layouts have already been + * initialized to match. + * + * This operator is only available if deal.II was configured with PETSc. + * + * This function is deprecated. Use the interface through + * ReadWriteVector instead. + */ + DEAL_II_DEPRECATED + Vector & + operator=(const PETScWrappers::MPI::Vector &petsc_vec); +#endif + +#ifdef DEAL_II_WITH_TRILINOS + /** + * Copy the content of a Trilinos vector into the calling vector. This + * function assumes that the vectors layouts have already been + * initialized to match. + * + * This operator is only available if deal.II was configured with + * Trilinos. + * + * This function is deprecated. Use the interface through + * ReadWriteVector instead. + */ + DEAL_II_DEPRECATED + Vector & + operator=(const TrilinosWrappers::MPI::Vector &trilinos_vec); +#endif + //@} + + /** + * @name 2: Parallel data exchange + */ + //@{ + /** + * This function copies the data that has accumulated in the data buffer + * for ghost indices to the owning processor. For the meaning of the + * argument @p operation, see the entry on + * @ref GlossCompress "Compressing SharedMPI vectors and matrices" + * in the glossary. + * + * There are four variants for this function. If called with argument @p + * VectorOperation::add adds all the data accumulated in ghost elements + * to the respective elements on the owning processor and clears the + * ghost array afterwards. If called with argument @p + * VectorOperation::insert, a set operation is performed. Since setting + * elements in a vector with ghost elements is ambiguous (as one can set + * both the element on the ghost site as well as the owning site), this + * operation makes the assumption that all data is set correctly on the + * owning processor. Upon call of compress(VectorOperation::insert), all + * ghost entries are thus simply zeroed out (using zero_ghost_values()). + * In debug mode, a check is performed for whether the data set is + * actually consistent between processors, i.e., whenever a non-zero + * ghost element is found, it is compared to the value on the owning + * processor and an exception is thrown if these elements do not agree. + * If called with VectorOperation::min or VectorOperation::max, the + * minimum or maximum on all elements across the processors is set. + * @note This vector class has a fixed set of ghost entries attached to + * the local representation. As a consequence, all ghost entries are + * assumed to be valid and will be exchanged unconditionally according + * to the given VectorOperation. Make sure to initialize all ghost + * entries with the neutral element of the given VectorOperation or + * touch all ghost entries. The neutral element is zero for + * VectorOperation::add and VectorOperation::insert, `+inf` for + * VectorOperation::min, and `-inf` for VectorOperation::max. If all + * values are initialized with values below zero and compress is called + * with VectorOperation::max two times subsequently, the maximal value + * after the second calculation will be zero. + */ + virtual void + compress(::dealii::VectorOperation::values operation) override; + + /** + * Fills the data field for ghost indices with the values stored in the + * respective positions of the owning processor. This function is needed + * before reading from ghosts. The function is @p const even though + * ghost data is changed. This is needed to allow functions with a @p + * const vector to perform the data exchange without creating + * temporaries. + * + * After calling this method, write access to ghost elements of the + * vector is forbidden and an exception is thrown. Only read access to + * ghost elements is allowed in this state. Note that all subsequent + * operations on this vector, like global vector addition, etc., will + * also update the ghost values by a call to this method after the + * operation. However, global reduction operations like norms or the + * inner product will always ignore ghost elements in order to avoid + * counting the ghost data more than once. To allow writing to ghost + * elements again, call zero_out_ghosts(). + * + * @see + * @ref GlossGhostedVector "vectors with ghost elements" + */ + void + update_ghost_values() const; + + /** + * Initiates communication for the @p compress() function with non- + * blocking communication. This function does not wait for the transfer + * to finish, in order to allow for other computations during the time + * it takes until all data arrives. + * + * Before the data is actually exchanged, the function must be followed + * by a call to @p compress_finish(). + * + * In case this function is called for more than one vector before @p + * compress_finish() is invoked, it is mandatory to specify a unique + * communication channel to each such call, in order to avoid several + * messages with the same ID that will corrupt this operation. Any + * communication channel less than 100 is a valid value (in particular, + * the range $[100, 200)$ is reserved for + * LinearAlgebra::SharedMPI::BlockVector). + */ + void + compress_start( + const unsigned int communication_channel = 0, + ::dealii::VectorOperation::values operation = VectorOperation::add); + + /** + * For all requests that have been initiated in compress_start, wait for + * the communication to finish. Once it is finished, add or set the data + * (depending on the flag operation) to the respective positions in the + * owning processor, and clear the contents in the ghost data fields. + * The meaning of this argument is the same as in compress(). + * + * This function should be called exactly once per vector after calling + * compress_start, otherwise the result is undefined. In particular, it + * is not well-defined to call compress_start on the same vector again + * before compress_finished has been called. However, there is no + * warning to prevent this situation. + * + * Must follow a call to the @p compress_start function. + */ + void + compress_finish(::dealii::VectorOperation::values operation); + + /** + * Initiates communication for the @p update_ghost_values() function + * with non-blocking communication. This function does not wait for the + * transfer to finish, in order to allow for other computations during + * the time it takes until all data arrives. + * + * Before the data is actually exchanged, the function must be followed + * by a call to @p update_ghost_values_finish(). + * + * In case this function is called for more than one vector before @p + * update_ghost_values_finish() is invoked, it is mandatory to specify a + * unique communication channel to each such call, in order to avoid + * several messages with the same ID that will corrupt this operation. + * Any communication channel less than 100 is a valid value (in + * particular, the range $[100, 200)$ is reserved for + * LinearAlgebra::SharedMPI::BlockVector). + */ + void + update_ghost_values_start( + const unsigned int communication_channel = 0) const; + + + /** + * For all requests that have been started in update_ghost_values_start, + * wait for the communication to finish. + * + * Must follow a call to the @p update_ghost_values_start function + * before reading data from ghost indices. + */ + void + update_ghost_values_finish() const; + + /** + * This method zeros the entries on ghost dofs, but does not touch + * locally owned DoFs. + * + * After calling this method, read access to ghost elements of the + * vector is forbidden and an exception is thrown. Only write access to + * ghost elements is allowed in this state. + */ + void + zero_out_ghosts() const; + + /** + * Return whether the vector currently is in a state where ghost values + * can be read or not. This is the same functionality as other parallel + * vectors have. If this method returns false, this only means that + * read-access to ghost elements is prohibited whereas write access is + * still possible (to those entries specified as ghosts during + * initialization), not that there are no ghost elements at all. + * + * @see + * @ref GlossGhostedVector "vectors with ghost elements" + */ + bool + has_ghost_elements() const; + + /** + * This method copies the data in the locally owned range from another + * SharedMPI vector @p src into the calling vector. As opposed to + * operator= that also includes ghost entries, this operation ignores + * the ghost range. The only prerequisite is that the local range on the + * calling vector and the given vector @p src are the same on all + * processors. It is explicitly allowed that the two vectors have + * different ghost elements that might or might not be related to each + * other. + * + * Since no data exchange is performed, make sure that neither @p src + * nor the calling vector have pending communications in order to obtain + * correct results. + */ + template + void + copy_locally_owned_data_from(const Vector &src); + + /** + * Import all the elements present in the SharedMPI vector @p src. + * VectorOperation::values @p operation is used to decide if the elements + * in @p V should be added to the current vector or replace the current + * elements. The main purpose of this function is to get data from one + * memory space, e.g. CUDA, to the other, e.g. the Host. + * + * @note The partitioners of the two SharedMPI vectors need to be the + * same as no MPI communication is performed. + */ + template + void + import(const Vector &src, + VectorOperation::values operation); + + //@} + + /** + * @name 3: Implementation of VectorSpaceVector + */ + //@{ + + /** + * Change the dimension to that of the vector V. The elements of V are not + * copied. + */ + virtual void + reinit(const VectorSpaceVector &V, + const bool omit_zeroing_entries = false) override; + + /** + * Multiply the entire vector by a fixed factor. + */ + virtual Vector & + operator*=(const Number factor) override; + + /** + * Divide the entire vector by a fixed factor. + */ + virtual Vector & + operator/=(const Number factor) override; + + /** + * Add the vector @p V to the present one. + */ + virtual Vector & + operator+=(const VectorSpaceVector &V) override; + + /** + * Subtract the vector @p V from the present one. + */ + virtual Vector & + operator-=(const VectorSpaceVector &V) override; + + /** + * Import all the elements present in the vector's IndexSet from the input + * vector @p V. VectorOperation::values @p operation is used to decide if + * the elements in @p V should be added to the current vector or replace the + * current elements. The last parameter can be used if the same + * communication pattern is used multiple times. This can be used to + * improve performance. + * + * @note If the MemorySpace is CUDA, the data in the ReadWriteVector will + * be moved to the device. + */ + virtual void + import( + const LinearAlgebra::ReadWriteVector & V, + VectorOperation::values operation, + std::shared_ptr communication_pattern = + std::shared_ptr()) override; + + /** + * Return the scalar product of two vectors. + */ + virtual Number + operator*(const VectorSpaceVector &V) const override; + + /** + * Add @p a to all components. Note that @p a is a scalar not a vector. + */ + virtual void + add(const Number a) override; + + /** + * Simple addition of a multiple of a vector, i.e. *this += a*V. + */ + virtual void + add(const Number a, const VectorSpaceVector &V) override; + + /** + * Multiple addition of scaled vectors, i.e. *this += a*V+b*W. + */ + virtual void + add(const Number a, + const VectorSpaceVector &V, + const Number b, + const VectorSpaceVector &W) override; + + /** + * A collective add operation: This function adds a whole set of values + * stored in @p values to the vector components specified by @p indices. + */ + virtual void + add(const std::vector &indices, + const std::vector & values); + + /** + * Scaling and simple addition of a multiple of a vector, i.e. *this = + * s*(*this)+a*V. + */ + virtual void + sadd(const Number s, + const Number a, + const VectorSpaceVector &V) override; + + /** + * Scale each element of this vector by the corresponding element in the + * argument. This function is mostly meant to simulate multiplication (and + * immediate re-assignment) by a diagonal scaling matrix. + */ + virtual void + scale(const VectorSpaceVector &scaling_factors) override; + + /** + * Assignment *this = a*V. + */ + virtual void + equ(const Number a, const VectorSpaceVector &V) override; + + /** + * Return the l1 norm of the vector (i.e., the sum of the + * absolute values of all entries among all processors). + */ + virtual real_type + l1_norm() const override; + + /** + * Return the $l_2$ norm of the vector (i.e., the square root of + * the sum of the square of all entries among all processors). + */ + virtual real_type + l2_norm() const override; + + /** + * Return the square of the $l_2$ norm of the vector. + */ + real_type + norm_sqr() const; + + /** + * Return the maximum norm of the vector (i.e., the maximum absolute value + * among all entries and among all processors). + */ + virtual real_type + linfty_norm() const override; + + /** + * Perform a combined operation of a vector addition and a subsequent + * inner product, returning the value of the inner product. In other + * words, the result of this function is the same as if the user called + * @code + * this->add(a, V); + * return_value = *this * W; + * @endcode + * + * The reason this function exists is that this operation involves less + * memory transfer than calling the two functions separately. This method + * only needs to load three vectors, @p this, @p V, @p W, whereas calling + * separate methods means to load the calling vector @p this twice. Since + * most vector operations are memory transfer limited, this reduces the + * time by 25\% (or 50\% if @p W equals @p this). + * + * For complex-valued vectors, the scalar product in the second step is + * implemented as + * $\left=\sum_i v_i \bar{w_i}$. + */ + virtual Number + add_and_dot(const Number a, + const VectorSpaceVector &V, + const VectorSpaceVector &W) override; + + /** + * Return the global size of the vector, equal to the sum of the number of + * locally owned indices among all processors. + */ + virtual size_type + size() const override; + + /** + * Return an index set that describes which elements of this vector are + * owned by the current processor. As a consequence, the index sets + * returned on different processors if this is a SharedMPI vector will + * form disjoint sets that add up to the complete index set. Obviously, if + * a vector is created on only one processor, then the result would + * satisfy + * @code + * vec.locally_owned_elements() == complete_index_set(vec.size()) + * @endcode + */ + virtual dealii::IndexSet + locally_owned_elements() const override; + + /** + * Print the vector to the output stream @p out. + */ + virtual void + print(std::ostream & out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const override; + + /** + * Return the memory consumption of this class in bytes. + */ + virtual std::size_t + memory_consumption() const override; + //@} + + /** + * @name 4: Other vector operations not included in VectorSpaceVector + */ + //@{ + + /** + * Sets all elements of the vector to the scalar @p s. If the scalar is + * zero, also ghost elements are set to zero, otherwise they remain + * unchanged. + */ + virtual Vector & + operator=(const Number s) override; + + /** + * This is a collective add operation that adds a whole set of values + * stored in @p values to the vector components specified by @p indices. + */ + template + void + add(const std::vector & indices, + const ::dealii::Vector &values); + + /** + * Take an address where n_elements are stored contiguously and add them + * into the vector. + */ + template + void + add(const size_type n_elements, + const size_type * indices, + const OtherNumber *values); + + /** + * Scaling and simple vector addition, i.e. *this = + * s*(*this)+V. + */ + void + sadd(const Number s, const Vector &V); + + /** + * Scaling and multiple addition. + * + * This function is deprecated. + */ + DEAL_II_DEPRECATED + void + sadd(const Number s, + const Number a, + const Vector &V, + const Number b, + const Vector &W); + + /** + * Assignment *this = a*u + b*v. + * + * This function is deprecated. + */ + DEAL_II_DEPRECATED + void + equ(const Number a, + const Vector &u, + const Number b, + const Vector &v); + + //@} + + + /** + * @name 5: Entry access and local data representation + */ + //@{ + + /** + * Return the local size of the vector, i.e., the number of indices + * owned locally. + */ + size_type + local_size() const; + + /** + * Return the half-open interval that specifies the locally owned range + * of the vector. Note that local_size() == local_range().second - + * local_range().first. + * + * This function is deprecated. + */ + DEAL_II_DEPRECATED + std::pair + local_range() const; + + /** + * Return true if the given global index is in the local range of this + * processor. + * + * This function is deprecated. + */ + DEAL_II_DEPRECATED + bool + in_local_range(const size_type global_index) const; + + /** + * Return the number of ghost elements present on the vector. + * + * This function is deprecated. + */ + DEAL_II_DEPRECATED + size_type + n_ghost_entries() const; + + /** + * Return an index set that describes which elements of this vector are + * not owned by the current processor but can be written into or read + * from locally (ghost elements). + * + * This function is deprecated. + */ + DEAL_II_DEPRECATED + const IndexSet & + ghost_elements() const; + + /** + * Return whether the given global index is a ghost index on the + * present processor. Returns false for indices that are owned locally + * and for indices not present at all. + * + * This function is deprecated. + */ + DEAL_II_DEPRECATED + bool + is_ghost_entry(const types::global_dof_index global_index) const; + + /** + * Make the @p Vector class a bit like the vector<> class of + * the C++ standard library by returning iterators to the start and end + * of the locally owned elements of this vector. + * + * It holds that end() - begin() == local_size(). + * + * @note For the CUDA memory space, the iterator points to memory on the + * device. + */ + iterator + begin(); + + /** + * Return constant iterator to the start of the locally owned elements + * of the vector. + * + * @note For the CUDA memory space, the iterator points to memory on the + * device. + */ + const_iterator + begin() const; + + /** + * Return an iterator pointing to the element past the end of the array + * of locally owned entries. + * + * @note For the CUDA memory space, the iterator points to memory on the + * device. + */ + iterator + end(); + + /** + * Return a constant iterator pointing to the element past the end of + * the array of the locally owned entries. + * + * @note For the CUDA memory space, the iterator points to memory on the + * device. + */ + const_iterator + end() const; + + /** + * Read access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. + * + * Performance: O(1) for locally owned elements that represent + * a contiguous range and O(log(nranges)) for ghost + * elements (quite fast, but slower than local_element()). + */ + Number + operator()(const size_type global_index) const; + + /** + * Read and write access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. + * + * Performance: O(1) for locally owned elements that represent + * a contiguous range and O(log(nranges)) for ghost + * elements (quite fast, but slower than local_element()). + */ + Number & + operator()(const size_type global_index); + + /** + * Read access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. + * + * This function does the same thing as operator(). + */ + Number operator[](const size_type global_index) const; + /** + * Read and write access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. + * + * This function does the same thing as operator(). + */ + Number &operator[](const size_type global_index); + + /** + * Read access to the data field specified by @p local_index. Locally + * owned indices can be accessed with indices + * [0,local_size), and ghost indices with indices + * [local_size,local_size+ n_ghost_entries]. + * + * Performance: Direct array access (fast). + */ + Number + local_element(const size_type local_index) const; + + /** + * Read and write access to the data field specified by @p local_index. + * Locally owned indices can be accessed with indices + * [0,local_size), and ghost indices with indices + * [local_size,local_size+n_ghosts]. + * + * Performance: Direct array access (fast). + */ + Number & + local_element(const size_type local_index); + + /** + * Return the pointer to the underlying raw array. + * + * @note For the CUDA memory space, the pointer points to memory on the + * device. + */ + Number * + get_values() const; + + /** + * Instead of getting individual elements of a vector via operator(), + * this function allows getting a whole set of elements at once. The + * indices of the elements to be read are stated in the first argument, + * the corresponding values are returned in the second. + * + * If the current vector is called @p v, then this function is the equivalent + * to the code + * @code + * for (unsigned int i=0; i + void + extract_subvector_to(const std::vector &indices, + std::vector & values) const; + + /** + * Instead of getting individual elements of a vector via operator(), + * this function allows getting a whole set of elements at once. In + * contrast to the previous function, this function obtains the + * indices of the elements by dereferencing all elements of the iterator + * range provided by the first two arguments, and puts the vector + * values into memory locations obtained by dereferencing a range + * of iterators starting at the location pointed to by the third + * argument. + * + * If the current vector is called @p v, then this function is the equivalent + * to the code + * @code + * ForwardIterator indices_p = indices_begin; + * OutputIterator values_p = values_begin; + * while (indices_p != indices_end) + * { + * *values_p = v[*indices_p]; + * ++indices_p; + * ++values_p; + * } + * @endcode + * + * @pre It must be possible to write into as many memory locations + * starting at @p values_begin as there are iterators between + * @p indices_begin and @p indices_end. + */ + template + void + extract_subvector_to(ForwardIterator indices_begin, + const ForwardIterator indices_end, + OutputIterator values_begin) const; + /** + * Return whether the vector contains only elements with value zero. + * This is a collective operation. This function is expensive, because + * potentially all elements have to be checked. + */ + virtual bool + all_zero() const override; + + /** + * Compute the mean value of all the entries in the vector. + */ + virtual Number + mean_value() const override; + + /** + * $l_p$-norm of the vector. The pth root of the sum of the pth powers + * of the absolute values of the elements. + */ + real_type + lp_norm(const real_type p) const; + //@} + + /** + * @name 6: Mixed stuff + */ + //@{ + + /** + * Return a reference to the MPI communicator object in use with this + * vector. + */ + const MPI_Comm & + get_mpi_communicator() const; + + /** + * Return the MPI partitioner that describes the parallel layout of the + * vector. This object can be used to initialize another vector with the + * respective reinit() call, for additional queries regarding the + * parallel communication, or the compatibility of partitioners. + */ + const std::shared_ptr & + get_partitioner() const; + + /** + * Check whether the given partitioner is compatible with the + * partitioner used for this vector. Two partitioners are compatible if + * they have the same local size and the same ghost indices. They do not + * necessarily need to be the same data field of the shared pointer. + * This is a local operation only, i.e., if only some processors decide + * that the partitioning is not compatible, only these processors will + * return @p false, whereas the other processors will return @p true. + */ + bool + partitioners_are_compatible( + const Utilities::MPI::Partitioner &part) const; + + /** + * Check whether the given partitioner is compatible with the + * partitioner used for this vector. Two partitioners are compatible if + * they have the same local size and the same ghost indices. They do not + * necessarily need to be the same data field. As opposed to + * partitioners_are_compatible(), this method checks for compatibility + * among all processors and the method only returns @p true if the + * partitioner is the same on all processors. + * + * This method performs global communication, so make sure to use it + * only in a context where all processors call it the same number of + * times. + */ + bool + partitioners_are_globally_compatible( + const Utilities::MPI::Partitioner &part) const; + + /** + * Change the ghost state of this vector to @p ghosted. + */ + void + set_ghost_state(const bool ghosted) const; + + //@} + + /** + * Attempt to perform an operation between two incompatible vector types. + * + * @ingroup Exceptions + */ + DeclException0(ExcVectorTypeNotCompatible); + + /** + * Attempt to perform an operation not implemented on the device. + * + * @ingroup Exceptions + */ + DeclException0(ExcNotAllowedForCuda); + + /** + * Exception + */ + DeclException3(ExcNonMatchingElements, + Number, + Number, + unsigned int, + << "Called compress(VectorOperation::insert), but" + << " the element received from a remote processor, value " + << std::setprecision(16) << arg1 + << ", does not match with the value " + << std::setprecision(16) << arg2 + << " on the owner processor " << arg3); + + /** + * Exception + */ + DeclException4(ExcAccessToNonLocalElement, + size_type, + size_type, + size_type, + size_type, + << "You tried to access element " << arg1 + << " of a SharedMPI vector, but this element is not " + << "stored on the current processor. Note: The range of " + << "locally owned elements is " << arg2 << " to " << arg3 + << ", and there are " << arg4 << " ghost elements " + << "that this vector can access."); + + private: + /** + * Simple addition of a multiple of a vector, i.e. *this += a*V + * without MPI communication. + */ + void + add_local(const Number a, const VectorSpaceVector &V); + + /** + * Scaling and simple addition of a multiple of a vector, i.e. *this = + * s*(*this)+a*V without MPI communication. + */ + void + sadd_local(const Number s, + const Number a, + const VectorSpaceVector &V); + + /** + * Local part of the inner product of two vectors. + */ + template + Number + inner_product_local(const Vector &V) const; + + /** + * Local part of norm_sqr(). + */ + real_type + norm_sqr_local() const; + + /** + * Local part of mean_value(). + */ + Number + mean_value_local() const; + + /** + * Local part of l1_norm(). + */ + real_type + l1_norm_local() const; + + /** + * Local part of lp_norm(). + */ + real_type + lp_norm_local(const real_type p) const; + + /** + * Local part of linfty_norm(). + */ + real_type + linfty_norm_local() const; + + /** + * Local part of the addition followed by an inner product of two + * vectors. The same applies for complex-valued vectors as for + * the add_and_dot() function. + */ + Number + add_and_dot_local(const Number a, + const Vector &V, + const Vector &W); + + /** + * Shared pointer to store the parallel partitioning information. This + * information can be shared between several vectors that have the same + * partitioning. + */ + std::shared_ptr partitioner; + + /** + * The size that is currently allocated in the val array. + */ + size_type allocated_size; + + /** + * Underlying data structure storing the local elements of this vector. + */ + mutable ::dealii::MemorySpace::MemorySpaceData data; + + /** + * For parallel loops with TBB, this member variable stores the affinity + * information of loops. + */ + mutable std::shared_ptr<::dealii::parallel::internal::TBBPartitioner> + thread_loop_partitioner; + + /** + * Temporary storage that holds the data that is sent to this processor + * in @p compress() or sent from this processor in + * @p update_ghost_values. + */ + mutable ::dealii::MemorySpace::MemorySpaceData + import_data; + + /** + * Stores whether the vector currently allows for reading ghost elements + * or not. Note that this is to ensure consistent ghost data and does + * not indicate whether the vector actually can store ghost elements. In + * particular, when assembling a vector we do not allow reading + * elements, only writing them. + */ + mutable bool vector_is_ghosted; + +#ifdef DEAL_II_WITH_MPI + /** + * A vector that collects all requests from @p compress() operations. + * This class uses persistent MPI communicators, i.e., the communication + * channels are stored during successive calls to a given function. This + * reduces the overhead involved with setting up the MPI machinery, but + * it does not remove the need for a receive operation to be posted + * before the data can actually be sent. + */ + std::vector compress_requests; + + /** + * A vector that collects all requests from @p update_ghost_values() + * operations. This class uses persistent MPI communicators. + */ + mutable std::vector update_ghost_values_requests; +#endif + + /** + * A lock that makes sure that the @p compress and @p + * update_ghost_values functions give reasonable results also when used + * with several threads. + */ + mutable std::mutex mutex; + + /** + * A helper function that clears the compress_requests and + * update_ghost_values_requests field. Used in reinit functions. + */ + void + clear_mpi_requests(); + + /** + * A helper function that is used to resize the val array. + */ + void + resize_val(const size_type new_allocated_size); + + // Make all other vector types friends. + template + friend class Vector; + + // Make BlockVector type friends. + template + friend class BlockVector; + }; + /*@}*/ + + + /*-------------------- Inline functions ---------------------------------*/ + +#ifndef DOXYGEN + + namespace internal + { + template + struct Policy + { + static inline typename Vector::iterator + begin(::dealii::MemorySpace::MemorySpaceData &) + { + return nullptr; + } + + static inline typename Vector::const_iterator + begin( + const ::dealii::MemorySpace::MemorySpaceData &) + { + return nullptr; + } + + static inline Number * + get_values( + ::dealii::MemorySpace::MemorySpaceData &) + { + return nullptr; + } + }; + + + + template + struct Policy + { + static inline + typename Vector::iterator + begin(::dealii::MemorySpace:: + MemorySpaceData &data) + { + return data.values.get(); + } + + static inline + typename Vector::const_iterator + begin(const ::dealii::MemorySpace:: + MemorySpaceData &data) + { + return data.values.get(); + } + + static inline Number * + get_values(::dealii::MemorySpace:: + MemorySpaceData &data) + { + return data.values.get(); + } + }; + + + + template + struct Policy + { + static inline + typename Vector::iterator + begin(::dealii::MemorySpace:: + MemorySpaceData &data) + { + return data.values_dev.get(); + } + + static inline + typename Vector::const_iterator + begin(const ::dealii::MemorySpace:: + MemorySpaceData &data) + { + return data.values_dev.get(); + } + + static inline Number * + get_values(::dealii::MemorySpace:: + MemorySpaceData &data) + { + return data.values_dev.get(); + } + }; + } // namespace internal + + + template + inline bool + Vector::has_ghost_elements() const + { + return vector_is_ghosted; + } + + + + template + inline typename Vector::size_type + Vector::size() const + { + return partitioner->size(); + } + + + + template + inline typename Vector::size_type + Vector::local_size() const + { + return partitioner->local_size(); + } + + + + template + inline std::pair::size_type, + typename Vector::size_type> + Vector::local_range() const + { + return partitioner->local_range(); + } + + + + template + inline bool + Vector::in_local_range( + const size_type global_index) const + { + return partitioner->in_local_range(global_index); + } + + + + template + inline IndexSet + Vector::locally_owned_elements() const + { + IndexSet is(size()); + + is.add_range(partitioner->local_range().first, + partitioner->local_range().second); + + return is; + } + + + + template + inline typename Vector::size_type + Vector::n_ghost_entries() const + { + return partitioner->n_ghost_indices(); + } + + + + template + inline const IndexSet & + Vector::ghost_elements() const + { + return partitioner->ghost_indices(); + } + + + + template + inline bool + Vector::is_ghost_entry( + const size_type global_index) const + { + return partitioner->is_ghost_entry(global_index); + } + + + + template + inline typename Vector::iterator + Vector::begin() + { + return internal::Policy::begin(data); + } + + + + template + inline typename Vector::const_iterator + Vector::begin() const + { + return internal::Policy::begin(data); + } + + + + template + inline typename Vector::iterator + Vector::end() + { + return internal::Policy::begin(data) + + partitioner->local_size(); + } + + + + template + inline typename Vector::const_iterator + Vector::end() const + { + return internal::Policy::begin(data) + + partitioner->local_size(); + } + + + + template + inline Number + Vector::operator()(const size_type global_index) const + { + Assert((std::is_same::value), + ExcMessage( + "This function is only implemented for the Host memory space")); + Assert( + partitioner->in_local_range(global_index) || + partitioner->ghost_indices().is_element(global_index), + ExcAccessToNonLocalElement(global_index, + partitioner->local_range().first, + partitioner->local_range().second, + partitioner->ghost_indices().n_elements())); + // do not allow reading a vector which is not in ghost mode + Assert(partitioner->in_local_range(global_index) || + vector_is_ghosted == true, + ExcMessage("You tried to read a ghost element of this vector, " + "but it has not imported its ghost values.")); + return data.values[partitioner->global_to_local(global_index)]; + } + + + + template + inline Number & + Vector::operator()(const size_type global_index) + { + Assert((std::is_same::value), + ExcMessage( + "This function is only implemented for the Host memory space")); + Assert( + partitioner->in_local_range(global_index) || + partitioner->ghost_indices().is_element(global_index), + ExcAccessToNonLocalElement(global_index, + partitioner->local_range().first, + partitioner->local_range().second, + partitioner->ghost_indices().n_elements())); + // we would like to prevent reading ghosts from a vector that does not + // have them imported, but this is not possible because we might be in a + // part of the code where the vector has enabled ghosts but is non-const + // (then, the compiler picks this method according to the C++ rule book + // even if a human would pick the const method when this subsequent use + // is just a read) + return data.values[partitioner->global_to_local(global_index)]; + } + + + + template + inline Number Vector:: + operator[](const size_type global_index) const + { + return operator()(global_index); + } + + + + template + inline Number &Vector:: + operator[](const size_type global_index) + { + return operator()(global_index); + } + + + + template + inline Number + Vector::local_element( + const size_type local_index) const + { + Assert((std::is_same::value), + ExcMessage( + "This function is only implemented for the Host memory space")); + AssertIndexRange(local_index, + partitioner->local_size() + + partitioner->n_ghost_indices()); + // do not allow reading a vector which is not in ghost mode + Assert(local_index < local_size() || vector_is_ghosted == true, + ExcMessage("You tried to read a ghost element of this vector, " + "but it has not imported its ghost values.")); + + return data.values[local_index]; + } + + + + template + inline Number & + Vector::local_element(const size_type local_index) + { + Assert((std::is_same::value), + ExcMessage( + "This function is only implemented for the Host memory space")); + + AssertIndexRange(local_index, + partitioner->local_size() + + partitioner->n_ghost_indices()); + + return data.values[local_index]; + } + + + + template + inline Number * + Vector::get_values() const + { + return internal::Policy::get_values(data); + } + + + + template + template + inline void + Vector::extract_subvector_to( + const std::vector &indices, + std::vector & values) const + { + for (size_type i = 0; i < indices.size(); ++i) + values[i] = operator()(indices[i]); + } + + + + template + template + inline void + Vector::extract_subvector_to( + ForwardIterator indices_begin, + const ForwardIterator indices_end, + OutputIterator values_begin) const + { + while (indices_begin != indices_end) + { + *values_begin = operator()(*indices_begin); + indices_begin++; + values_begin++; + } + } + + + + template + template + inline void + Vector::add( + const std::vector & indices, + const ::dealii::Vector &values) + { + AssertDimension(indices.size(), values.size()); + for (size_type i = 0; i < indices.size(); ++i) + { + Assert( + numbers::is_finite(values[i]), + ExcMessage( + "The given value is not finite but either infinite or Not A Number (NaN)")); + this->operator()(indices[i]) += values(i); + } + } + + + + template + template + inline void + Vector::add(const size_type n_elements, + const size_type * indices, + const OtherNumber *values) + { + for (size_type i = 0; i < n_elements; ++i, ++indices, ++values) + { + Assert( + numbers::is_finite(*values), + ExcMessage( + "The given value is not finite but either infinite or Not A Number (NaN)")); + this->operator()(*indices) += *values; + } + } + + + + template + inline const MPI_Comm & + Vector::get_mpi_communicator() const + { + return partitioner->get_mpi_communicator(); + } + + + + template + inline const std::shared_ptr & + Vector::get_partitioner() const + { + return partitioner; + } + + + + template + inline void + Vector::set_ghost_state(const bool ghosted) const + { + vector_is_ghosted = ghosted; + } + +#endif + + } // namespace SharedMPI +} // namespace LinearAlgebra + + +/** + * Global function @p swap which overloads the default implementation of the + * C++ standard library which uses a temporary object. The function simply + * exchanges the data of the two vectors. + * + * @relatesalso Vector + * @author Katharina Kormann, Martin Kronbichler, 2011 + */ +template +inline void +swap(LinearAlgebra::SharedMPI::Vector &u, + LinearAlgebra::SharedMPI::Vector &v) +{ + u.swap(v); +} + + +/** + * Declare dealii::LinearAlgebra::Vector< Number > as SharedMPI vector. + * + * @author Uwe Koecher, 2017 + */ +template +struct is_serial_vector> + : std::false_type +{}; + + + +namespace internal +{ + namespace LinearOperatorImplementation + { + template + class ReinitHelper; + + /** + * A helper class used internally in linear_operator.h. Specialization for + * LinearAlgebra::SharedMPI::Vector. + */ + template + class ReinitHelper> + { + public: + template + static void + reinit_range_vector(const Matrix & matrix, + LinearAlgebra::SharedMPI::Vector &v, + bool omit_zeroing_entries) + { + matrix.initialize_dof_vector(v); + if (!omit_zeroing_entries) + v = Number(); + } + + template + static void + reinit_domain_vector(const Matrix & matrix, + LinearAlgebra::SharedMPI::Vector &v, + bool omit_zeroing_entries) + { + matrix.initialize_dof_vector(v); + if (!omit_zeroing_entries) + v = Number(); + } + }; + + } // namespace LinearOperatorImplementation +} /* namespace internal */ + + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/include/deal.II/lac/la_sm_vector.templates.h b/include/deal.II/lac/la_sm_vector.templates.h new file mode 100644 index 0000000000..f83704973e --- /dev/null +++ b/include/deal.II/lac/la_sm_vector.templates.h @@ -0,0 +1,2151 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2011 - 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_la_parallel_vector_templates_h +#define dealii_la_parallel_vector_templates_h + + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + + +DEAL_II_NAMESPACE_OPEN + + +namespace LinearAlgebra +{ + namespace SharedMPI + { + namespace internal + { + // In the import_from_ghosted_array_finish we might need to calculate the + // maximal and minimal value for the given number type, which is not + // straightforward for complex numbers. Therefore, comparison of complex + // numbers is prohibited and throws an exception. + template + Number + get_min(const Number a, const Number b) + { + return std::min(a, b); + } + + template + std::complex + get_min(const std::complex a, const std::complex) + { + AssertThrow(false, + ExcMessage("VectorOperation::min not " + "implemented for complex numbers")); + return a; + } + + template + Number + get_max(const Number a, const Number b) + { + return std::max(a, b); + } + + template + std::complex + get_max(const std::complex a, const std::complex) + { + AssertThrow(false, + ExcMessage("VectorOperation::max not " + "implemented for complex numbers")); + return a; + } + + + + // Resize the underlying array on the host or on the device + template + struct la_parallel_vector_templates_functions + { + static_assert(std::is_same::value || + std::is_same::value, + "MemorySpace should be Host or CUDA"); + + static void + resize_val( + const types::global_dof_index /*new_alloc_size*/, + types::global_dof_index & /*allocated_size*/, + ::dealii::MemorySpace::MemorySpaceData + & /*data*/) + {} + + static void + import( + const ::dealii::LinearAlgebra::ReadWriteVector & /*V*/, + ::dealii::VectorOperation::values /*operation*/, + const std::shared_ptr & + /*communication_pattern*/, + const IndexSet & /*locally_owned_elem*/, + ::dealii::MemorySpace::MemorySpaceData + & /*data*/) + {} + + template + static void + linfty_norm_local( + const ::dealii::MemorySpace::MemorySpaceData + & /*data*/, + const unsigned int /*size*/, + RealType & /*max*/) + {} + }; + + template + struct la_parallel_vector_templates_functions + { + using size_type = types::global_dof_index; + + static void + resize_val(const types::global_dof_index new_alloc_size, + types::global_dof_index & allocated_size, + ::dealii::MemorySpace:: + MemorySpaceData &data) + { + if (new_alloc_size > allocated_size) + { + Assert(((allocated_size > 0 && data.values != nullptr) || + data.values == nullptr), + ExcInternalError()); + + Number *new_val; + Utilities::System::posix_memalign( + reinterpret_cast(&new_val), + 64, + sizeof(Number) * new_alloc_size); + data.values.reset(new_val); + + allocated_size = new_alloc_size; + } + else if (new_alloc_size == 0) + { + data.values.reset(); + allocated_size = 0; + } + } + + static void + import( + const ::dealii::LinearAlgebra::ReadWriteVector &V, + ::dealii::VectorOperation::values operation, + const std::shared_ptr + & communication_pattern, + const IndexSet &locally_owned_elem, + ::dealii::MemorySpace::MemorySpaceData + &data) + { + Assert( + (operation == ::dealii::VectorOperation::add) || + (operation == ::dealii::VectorOperation::insert), + ExcMessage( + "Only VectorOperation::add and VectorOperation::insert are allowed")); + + ::dealii::LinearAlgebra::SharedMPI:: + Vector + tmp_vector(communication_pattern); + + // fill entries from ReadWriteVector into the SharedMPI vector, + // including ghost entries. this is not really efficient right now + // because indices are translated twice, once by nth_index_in_set(i) + // and once for operator() of tmp_vector + const IndexSet &v_stored = V.get_stored_elements(); + for (size_type i = 0; i < v_stored.n_elements(); ++i) + tmp_vector(v_stored.nth_index_in_set(i)) = V.local_element(i); + + tmp_vector.compress(operation); + + // Copy the local elements of tmp_vector to the right place in val + IndexSet tmp_index_set = tmp_vector.locally_owned_elements(); + if (operation == VectorOperation::add) + { + for (size_type i = 0; i < tmp_index_set.n_elements(); ++i) + { + data.values[locally_owned_elem.index_within_set( + tmp_index_set.nth_index_in_set(i))] += + tmp_vector.local_element(i); + } + } + else + { + for (size_type i = 0; i < tmp_index_set.n_elements(); ++i) + { + data.values[locally_owned_elem.index_within_set( + tmp_index_set.nth_index_in_set(i))] = + tmp_vector.local_element(i); + } + } + } + + template + static void + linfty_norm_local(const ::dealii::MemorySpace::MemorySpaceData< + Number, + ::dealii::MemorySpace::Host> &data, + const unsigned int size, + RealType & max) + { + for (size_type i = 0; i < size; ++i) + max = + std::max(numbers::NumberTraits::abs(data.values[i]), max); + } + }; + +#ifdef DEAL_II_COMPILER_CUDA_AWARE + template + struct la_parallel_vector_templates_functions + { + using size_type = types::global_dof_index; + + static void + resize_val(const types::global_dof_index new_alloc_size, + types::global_dof_index & allocated_size, + ::dealii::MemorySpace:: + MemorySpaceData &data) + { + static_assert( + std::is_same::value || + std::is_same::value, + "Number should be float or double for CUDA memory space"); + + if (new_alloc_size > allocated_size) + { + Assert(((allocated_size > 0 && data.values_dev != nullptr) || + data.values_dev == nullptr), + ExcInternalError()); + + Number *new_val_dev; + Utilities::CUDA::malloc(new_val_dev, new_alloc_size); + data.values_dev.reset(new_val_dev); + + allocated_size = new_alloc_size; + } + else if (new_alloc_size == 0) + { + data.values_dev.reset(); + allocated_size = 0; + } + } + + static void + import(const ReadWriteVector &V, + VectorOperation::values operation, + std::shared_ptr + communication_pattern, + const IndexSet &locally_owned_elem, + ::dealii::MemorySpace:: + MemorySpaceData &data) + { + Assert( + (operation == ::dealii::VectorOperation::add) || + (operation == ::dealii::VectorOperation::insert), + ExcMessage( + "Only VectorOperation::add and VectorOperation::insert are allowed")); + + ::dealii::LinearAlgebra::SharedMPI:: + Vector + tmp_vector(communication_pattern); + + // fill entries from ReadWriteVector into the SharedMPI vector, + // including ghost entries. this is not really efficient right now + // because indices are translated twice, once by nth_index_in_set(i) + // and once for operator() of tmp_vector + const IndexSet & v_stored = V.get_stored_elements(); + const size_type n_elements = v_stored.n_elements(); + std::vector indices(n_elements); + for (size_type i = 0; i < n_elements; ++i) + indices[i] = communication_pattern->global_to_local( + v_stored.nth_index_in_set(i)); + // Move the indices to the device + size_type *indices_dev; + ::dealii::Utilities::CUDA::malloc(indices_dev, n_elements); + ::dealii::Utilities::CUDA::copy_to_dev(indices, indices_dev); + // Move the data to the device + Number *V_dev; + ::dealii::Utilities::CUDA::malloc(V_dev, n_elements); + cudaError_t cuda_error_code = cudaMemcpy(V_dev, + V.begin(), + n_elements * sizeof(Number), + cudaMemcpyHostToDevice); + AssertCuda(cuda_error_code); + + // Set the values in tmp_vector + const int n_blocks = + 1 + n_elements / (::dealii::CUDAWrappers::chunk_size * + ::dealii::CUDAWrappers::block_size); + ::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated + <<>>( + indices_dev, tmp_vector.begin(), V_dev, n_elements); + + tmp_vector.compress(operation); + + // Copy the local elements of tmp_vector to the right place in val + IndexSet tmp_index_set = tmp_vector.locally_owned_elements(); + const size_type tmp_n_elements = tmp_index_set.n_elements(); + indices.resize(tmp_n_elements); + for (size_type i = 0; i < tmp_n_elements; ++i) + indices[i] = locally_owned_elem.index_within_set( + tmp_index_set.nth_index_in_set(i)); + ::dealii::Utilities::CUDA::free(indices_dev); + ::dealii::Utilities::CUDA::malloc(indices_dev, tmp_n_elements); + ::dealii::Utilities::CUDA::copy_to_dev(indices, indices_dev); + + if (operation == VectorOperation::add) + ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_permutated< + Number><<>>( + indices_dev, + data.values_dev.get(), + tmp_vector.begin(), + tmp_n_elements); + else + ::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated< + Number><<>>( + indices_dev, + data.values_dev.get(), + tmp_vector.begin(), + tmp_n_elements); + + ::dealii::Utilities::CUDA::free(indices_dev); + ::dealii::Utilities::CUDA::free(V_dev); + } + + template + static void + linfty_norm_local(const ::dealii::MemorySpace::MemorySpaceData< + Number, + ::dealii::MemorySpace::CUDA> &data, + const unsigned int size, + RealType & result) + { + static_assert(std::is_same::value, + "RealType should be the same type as Number"); + + Number * result_device; + cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number)); + AssertCuda(error_code); + error_code = cudaMemset(result_device, 0, sizeof(Number)); + + const int n_blocks = 1 + size / (::dealii::CUDAWrappers::chunk_size * + ::dealii::CUDAWrappers::block_size); + ::dealii::LinearAlgebra::CUDAWrappers::kernel::reduction< + Number, + ::dealii::LinearAlgebra::CUDAWrappers::kernel::LInfty> + <<>>( + result_device, data.values_dev.get(), size); + + // Copy the result back to the host + error_code = cudaMemcpy(&result, + result_device, + sizeof(Number), + cudaMemcpyDeviceToHost); + AssertCuda(error_code); + // Free the memory on the device + error_code = cudaFree(result_device); + AssertCuda(error_code); + } + }; +#endif + } // namespace internal + + + template + void + Vector::clear_mpi_requests() + { +#ifdef DEAL_II_WITH_MPI + for (size_type j = 0; j < compress_requests.size(); j++) + { + const int ierr = MPI_Request_free(&compress_requests[j]); + AssertThrowMPI(ierr); + } + compress_requests.clear(); + for (size_type j = 0; j < update_ghost_values_requests.size(); j++) + { + const int ierr = MPI_Request_free(&update_ghost_values_requests[j]); + AssertThrowMPI(ierr); + } + update_ghost_values_requests.clear(); +#endif + } + + + + template + void + Vector::resize_val(const size_type new_alloc_size) + { + internal::la_parallel_vector_templates_functions< + Number, + MemorySpaceType>::resize_val(new_alloc_size, allocated_size, data); + + thread_loop_partitioner = + std::make_shared<::dealii::parallel::internal::TBBPartitioner>(); + } + + + + template + void + Vector::reinit(const size_type size, + const bool omit_zeroing_entries) + { + clear_mpi_requests(); + + // check whether we need to reallocate + resize_val(size); + + // delete previous content in import data + import_data.values.reset(); + import_data.values_dev.reset(); + + // set partitioner to serial version + partitioner = std::make_shared(size); + + // set entries to zero if so requested + if (omit_zeroing_entries == false) + this->operator=(Number()); + else + zero_out_ghosts(); + } + + + + template + template + void + Vector::reinit( + const Vector &v, + const bool omit_zeroing_entries) + { + clear_mpi_requests(); + Assert(v.partitioner.get() != nullptr, ExcNotInitialized()); + + // check whether the partitioners are + // different (check only if the are allocated + // differently, not if the actual data is + // different) + if (partitioner.get() != v.partitioner.get()) + { + partitioner = v.partitioner; + const size_type new_allocated_size = + partitioner->local_size() + partitioner->n_ghost_indices(); + resize_val(new_allocated_size); + } + + if (omit_zeroing_entries == false) + this->operator=(Number()); + else + zero_out_ghosts(); + + // do not reallocate import_data directly, but only upon request. It + // is only used as temporary storage for compress() and + // update_ghost_values, and we might have vectors where we never + // call these methods and hence do not need to have the storage. + import_data.values.reset(); + import_data.values_dev.reset(); + + thread_loop_partitioner = v.thread_loop_partitioner; + } + + + + template + void + Vector::reinit( + const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const MPI_Comm communicator) + { + // set up parallel partitioner with index sets and communicator + std::shared_ptr new_partitioner( + new Utilities::MPI::Partitioner(locally_owned_indices, + ghost_indices, + communicator)); + reinit(new_partitioner); + } + + + + template + void + Vector::reinit( + const IndexSet &locally_owned_indices, + const MPI_Comm communicator) + { + // set up parallel partitioner with index sets and communicator + std::shared_ptr new_partitioner( + new Utilities::MPI::Partitioner(locally_owned_indices, communicator)); + reinit(new_partitioner); + } + + + + template + void + Vector::reinit( + const std::shared_ptr &partitioner_in) + { + clear_mpi_requests(); + partitioner = partitioner_in; + + // set vector size and allocate memory + const size_type new_allocated_size = + partitioner->local_size() + partitioner->n_ghost_indices(); + resize_val(new_allocated_size); + + // initialize to zero + this->operator=(Number()); + + + // do not reallocate import_data directly, but only upon request. It + // is only used as temporary storage for compress() and + // update_ghost_values, and we might have vectors where we never + // call these methods and hence do not need to have the storage. + import_data.values.reset(); + import_data.values_dev.reset(); + + vector_is_ghosted = false; + } + + + + template + Vector::Vector() + : partitioner(new Utilities::MPI::Partitioner()) + , allocated_size(0) + { + reinit(0); + } + + + + template + Vector::Vector( + const Vector &v) + : Subscriptor() + , allocated_size(0) + , vector_is_ghosted(false) + { + reinit(v, true); + + thread_loop_partitioner = v.thread_loop_partitioner; + + const size_type this_size = local_size(); + if (this_size > 0) + { + dealii::internal::VectorOperations:: + functions::copy( + thread_loop_partitioner, partitioner->local_size(), v.data, data); + } + } + + + + template + Vector::Vector(const IndexSet &local_range, + const IndexSet &ghost_indices, + const MPI_Comm communicator) + : allocated_size(0) + , vector_is_ghosted(false) + { + reinit(local_range, ghost_indices, communicator); + } + + + + template + Vector::Vector(const IndexSet &local_range, + const MPI_Comm communicator) + : allocated_size(0) + , vector_is_ghosted(false) + { + reinit(local_range, communicator); + } + + + + template + Vector::Vector(const size_type size) + : allocated_size(0) + , vector_is_ghosted(false) + { + reinit(size, false); + } + + + + template + Vector::Vector( + const std::shared_ptr &partitioner) + : allocated_size(0) + , vector_is_ghosted(false) + { + reinit(partitioner); + } + + + + template + inline Vector::~Vector() + { + try + { + clear_mpi_requests(); + } + catch (...) + {} + } + + + + template + inline Vector & + Vector:: + operator=(const Vector &c) + { +#ifdef _MSC_VER + return this->operator=(c); +#else + return this->template operator=(c); +#endif + } + + + + template + template + inline Vector & + Vector:: + operator=(const Vector &c) + { + Assert(c.partitioner.get() != nullptr, ExcNotInitialized()); + + // we update ghost values whenever one of the input or output vector + // already held ghost values or when we import data from a vector with + // the same local range but different ghost layout + bool must_update_ghost_values = c.vector_is_ghosted; + + // check whether the two vectors use the same parallel partitioner. if + // not, check if all local ranges are the same (that way, we can + // exchange data between different parallel layouts). One variant which + // is included here and necessary for compatibility with the other + // SharedMPI vector classes (Trilinos, PETSc) is the case when vector + // c does not have any ghosts (constructed without ghost elements given) + // but the current vector does: In that case, we need to exchange data + // also when none of the two vector had updated its ghost values before. + if (partitioner.get() == nullptr) + reinit(c, true); + else if (partitioner.get() != c.partitioner.get()) + { + // local ranges are also the same if both partitioners are empty + // (even if they happen to define the empty range as [0,0) or [c,c) + // for some c!=0 in a different way). + int local_ranges_are_identical = + (partitioner->local_range() == c.partitioner->local_range() || + (partitioner->local_range().second == + partitioner->local_range().first && + c.partitioner->local_range().second == + c.partitioner->local_range().first)); + if ((c.partitioner->n_mpi_processes() > 1 && + Utilities::MPI::min(local_ranges_are_identical, + c.partitioner->get_mpi_communicator()) == + 0) || + !local_ranges_are_identical) + reinit(c, true); + else + must_update_ghost_values |= vector_is_ghosted; + + must_update_ghost_values |= + (c.partitioner->ghost_indices_initialized() == false && + partitioner->ghost_indices_initialized() == true); + } + else + must_update_ghost_values |= vector_is_ghosted; + + thread_loop_partitioner = c.thread_loop_partitioner; + + const size_type this_size = partitioner->local_size(); + if (this_size > 0) + { + dealii::internal::VectorOperations:: + functions::copy( + thread_loop_partitioner, this_size, c.data, data); + } + + if (must_update_ghost_values) + update_ghost_values(); + else + zero_out_ghosts(); + return *this; + } + + + + template + template + void + Vector::copy_locally_owned_data_from( + const Vector &src) + { + AssertDimension(partitioner->local_size(), src.partitioner->local_size()); + if (partitioner->local_size() > 0) + { + dealii::internal::VectorOperations:: + functions::copy( + thread_loop_partitioner, + partitioner->local_size(), + src.data, + data); + } + } + + + + template + template + void + Vector::import( + const Vector &src, + VectorOperation::values operation) + { + Assert(src.partitioner.get() != nullptr, ExcNotInitialized()); + Assert(partitioner->locally_owned_range() == + src.partitioner->locally_owned_range(), + ExcMessage("Locally owned indices should be identical.")); + Assert(partitioner->ghost_indices() == src.partitioner->ghost_indices(), + ExcMessage("Ghost indices should be identical.")); + ::dealii::internal::VectorOperations:: + functions::import( + thread_loop_partitioner, allocated_size, operation, src.data, data); + } + + + +#ifdef DEAL_II_WITH_PETSC + + namespace petsc_helpers + { + template + void + copy_petsc_vector(const PETSC_Number *petsc_start_ptr, + const PETSC_Number *petsc_end_ptr, + Number * ptr) + { + std::copy(petsc_start_ptr, petsc_end_ptr, ptr); + } + + template + void + copy_petsc_vector(const std::complex *petsc_start_ptr, + const std::complex *petsc_end_ptr, + std::complex * ptr) + { + std::copy(petsc_start_ptr, petsc_end_ptr, ptr); + } + + template + void + copy_petsc_vector(const std::complex * /*petsc_start_ptr*/, + const std::complex * /*petsc_end_ptr*/, + Number * /*ptr*/) + { + AssertThrow(false, ExcMessage("Tried to copy complex -> real")); + } + } // namespace petsc_helpers + + template + Vector & + Vector:: + operator=(const PETScWrappers::MPI::Vector &petsc_vec) + { + // TODO: We would like to use the same compact infrastructure as for the + // Trilinos vector below, but the interface through ReadWriteVector does + // not support overlapping (ghosted) PETSc vectors, which we need for + // backward compatibility. + + Assert(petsc_vec.locally_owned_elements() == locally_owned_elements(), + StandardExceptions::ExcInvalidState()); + + // get a representation of the vector and copy it + PetscScalar * start_ptr; + PetscErrorCode ierr = + VecGetArray(static_cast(petsc_vec), &start_ptr); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + + const size_type vec_size = local_size(); + petsc_helpers::copy_petsc_vector(start_ptr, + start_ptr + vec_size, + begin()); + + // restore the representation of the vector + ierr = VecRestoreArray(static_cast(petsc_vec), &start_ptr); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + + // spread ghost values between processes? + if (vector_is_ghosted || petsc_vec.has_ghost_elements()) + update_ghost_values(); + + // return a reference to this object per normal c++ operator overloading + // semantics + return *this; + } + +#endif + + + +#ifdef DEAL_II_WITH_TRILINOS + + template + Vector & + Vector:: + operator=(const TrilinosWrappers::MPI::Vector &trilinos_vec) + { +# ifdef DEAL_II_WITH_MPI + IndexSet combined_set = partitioner->locally_owned_range(); + combined_set.add_indices(partitioner->ghost_indices()); + ReadWriteVector rw_vector(combined_set); + rw_vector.import(trilinos_vec, VectorOperation::insert); + import(rw_vector, VectorOperation::insert); + + if (vector_is_ghosted || trilinos_vec.has_ghost_elements()) + update_ghost_values(); +# else + AssertThrow(false, ExcNotImplemented()); +# endif + + return *this; + } + +#endif + + + + template + void + Vector::compress( + ::dealii::VectorOperation::values operation) + { + compress_start(0, operation); + compress_finish(operation); + } + + + + template + void + Vector::update_ghost_values() const + { + update_ghost_values_start(); + update_ghost_values_finish(); + } + + + + template + void + Vector::zero_out_ghosts() const + { + if (data.values != nullptr) + std::fill_n(data.values.get() + partitioner->local_size(), + partitioner->n_ghost_indices(), + Number()); +#ifdef DEAL_II_COMPILER_CUDA_AWARE + if (data.values_dev != nullptr) + { + const cudaError_t cuda_error_code = + cudaMemset(data.values_dev.get() + partitioner->local_size(), + 0, + partitioner->n_ghost_indices() * sizeof(Number)); + AssertCuda(cuda_error_code); + } +#endif + + vector_is_ghosted = false; + } + + + + template + void + Vector::compress_start( + const unsigned int communication_channel, + ::dealii::VectorOperation::values operation) + { + AssertIndexRange(communication_channel, 200); + Assert(vector_is_ghosted == false, + ExcMessage("Cannot call compress() on a ghosted vector")); + +#ifdef DEAL_II_WITH_MPI + // make this function thread safe + std::lock_guard lock(mutex); + + // allocate import_data in case it is not set up yet + if (partitioner->n_import_indices() > 0) + { +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if (std::is_same::value) + { + if (import_data.values_dev == nullptr) + import_data.values_dev.reset( + Utilities::CUDA::allocate_device_data( + partitioner->n_import_indices())); + } + else +# endif + { +# if !defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + static_assert( + std::is_same::value, + "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); +# endif + if (import_data.values == nullptr) + { + Number *new_val; + Utilities::System::posix_memalign( + reinterpret_cast(&new_val), + 64, + sizeof(Number) * partitioner->n_import_indices()); + import_data.values.reset(new_val); + } + } + } + +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if (std::is_same::value) + { + // Move the data to the host and then move it back to the + // device. We use values to store the elements because the function + // uses a view of the array and thus we need the data on the host to + // outlive the scope of the function. + Number *new_val; + Utilities::System::posix_memalign(reinterpret_cast(&new_val), + 64, + sizeof(Number) * allocated_size); + + data.values.reset(new_val); + + cudaError_t cuda_error_code = + cudaMemcpy(data.values.get(), + data.values_dev.get(), + allocated_size * sizeof(Number), + cudaMemcpyDeviceToHost); + AssertCuda(cuda_error_code); + } +# endif + +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if (std::is_same::value) + { + partitioner->import_from_ghosted_array_start( + operation, + communication_channel, + ArrayView( + data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + ArrayView( + import_data.values_dev.get(), partitioner->n_import_indices()), + compress_requests); + } + else +# endif + { + partitioner->import_from_ghosted_array_start( + operation, + communication_channel, + ArrayView( + data.values.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + ArrayView( + import_data.values.get(), partitioner->n_import_indices()), + compress_requests); + } +#else + (void)communication_channel; + (void)operation; +#endif + } + + + + template + void + Vector::compress_finish( + ::dealii::VectorOperation::values operation) + { +#ifdef DEAL_II_WITH_MPI + vector_is_ghosted = false; + + // in order to zero ghost part of the vector, we need to call + // import_from_ghosted_array_finish() regardless of + // compress_requests.size() == 0 + + // make this function thread safe + std::lock_guard lock(mutex); +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if (std::is_same::value) + { + Assert(partitioner->n_import_indices() == 0 || + import_data.values_dev != nullptr, + ExcNotInitialized()); + partitioner + ->import_from_ghosted_array_finish( + operation, + ArrayView( + import_data.values_dev.get(), partitioner->n_import_indices()), + ArrayView(data.values_dev.get(), + partitioner->local_size()), + ArrayView( + data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + compress_requests); + } + else +# endif + { + Assert(partitioner->n_import_indices() == 0 || + import_data.values != nullptr, + ExcNotInitialized()); + partitioner + ->import_from_ghosted_array_finish( + operation, + ArrayView( + import_data.values.get(), partitioner->n_import_indices()), + ArrayView(data.values.get(), + partitioner->local_size()), + ArrayView( + data.values.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + compress_requests); + } + +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined DEAL_II_MPI_WITH_CUDA_SUPPORT + // The communication is done on the host, so we need to + // move the data back to the device. + if (std::is_same::value) + { + cudaError_t cuda_error_code = + cudaMemcpy(data.values_dev.get(), + data.values.get(), + allocated_size * sizeof(Number), + cudaMemcpyHostToDevice); + AssertCuda(cuda_error_code); + + data.values.reset(); + } +# endif +#else + (void)operation; +#endif + } + + + + template + void + Vector::update_ghost_values_start( + const unsigned int communication_channel) const + { + AssertIndexRange(communication_channel, 200); +#ifdef DEAL_II_WITH_MPI + // nothing to do when we neither have import nor ghost indices. + if (partitioner->n_ghost_indices() == 0 && + partitioner->n_import_indices() == 0) + return; + + // make this function thread safe + std::lock_guard lock(mutex); + + // allocate import_data in case it is not set up yet + if (partitioner->n_import_indices() > 0) + { +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + Assert( + (std::is_same::value), + ExcMessage( + "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!")); + if (import_data.values_dev == nullptr) + import_data.values_dev.reset( + Utilities::CUDA::allocate_device_data( + partitioner->n_import_indices())); +# else +# ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT + static_assert( + std::is_same::value, + "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); +# endif + if (import_data.values == nullptr) + { + Number *new_val; + Utilities::System::posix_memalign( + reinterpret_cast(&new_val), + 64, + sizeof(Number) * partitioner->n_import_indices()); + import_data.values.reset(new_val); + } +# endif + } + +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + // Move the data to the host and then move it back to the + // device. We use values to store the elements because the function + // uses a view of the array and thus we need the data on the host to + // outlive the scope of the function. + Number *new_val; + Utilities::System::posix_memalign(reinterpret_cast(&new_val), + 64, + sizeof(Number) * allocated_size); + + data.values.reset(new_val); + + cudaError_t cuda_error_code = cudaMemcpy(data.values.get(), + data.values_dev.get(), + allocated_size * sizeof(Number), + cudaMemcpyDeviceToHost); + AssertCuda(cuda_error_code); +# endif + +# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) + partitioner->export_to_ghosted_array_start( + communication_channel, + ArrayView(data.values.get(), + partitioner->local_size()), + ArrayView(import_data.values.get(), + partitioner->n_import_indices()), + ArrayView(data.values.get() + + partitioner->local_size(), + partitioner->n_ghost_indices()), + update_ghost_values_requests); +# else + partitioner->export_to_ghosted_array_start( + communication_channel, + ArrayView(data.values_dev.get(), + partitioner->local_size()), + ArrayView(import_data.values_dev.get(), + partitioner->n_import_indices()), + ArrayView(data.values_dev.get() + + partitioner->local_size(), + partitioner->n_ghost_indices()), + update_ghost_values_requests); +# endif + +#else + (void)communication_channel; +#endif + } + + + + template + void + Vector::update_ghost_values_finish() const + { +#ifdef DEAL_II_WITH_MPI + // wait for both sends and receives to complete, even though only + // receives are really necessary. this gives (much) better performance + AssertDimension(partitioner->ghost_targets().size() + + partitioner->import_targets().size(), + update_ghost_values_requests.size()); + if (update_ghost_values_requests.size() > 0) + { + // make this function thread safe + std::lock_guard lock(mutex); + +# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) + partitioner->export_to_ghosted_array_finish( + ArrayView( + data.values.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + update_ghost_values_requests); +# else + partitioner->export_to_ghosted_array_finish( + ArrayView( + data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + update_ghost_values_requests); +# endif + } + +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined DEAL_II_MPI_WITH_CUDA_SUPPORT + // The communication is done on the host, so we need to + // move the data back to the device. + if (std::is_same::value) + { + cudaError_t cuda_error_code = + cudaMemcpy(data.values_dev.get() + partitioner->local_size(), + data.values.get() + partitioner->local_size(), + partitioner->n_ghost_indices() * sizeof(Number), + cudaMemcpyHostToDevice); + AssertCuda(cuda_error_code); + + data.values.reset(); + } +# endif + +#endif + vector_is_ghosted = true; + } + + + + template + void + Vector::import( + const ReadWriteVector & V, + VectorOperation::values operation, + std::shared_ptr communication_pattern) + { + // If no communication pattern is given, create one. Otherwise, use the + // given one. + std::shared_ptr comm_pattern; + if (communication_pattern.get() == nullptr) + { + // Split the IndexSet of V in locally owned elements and ghost indices + // then create the communication pattern + IndexSet locally_owned_elem = locally_owned_elements(); + IndexSet ghost_indices = V.get_stored_elements(); + ghost_indices.subtract_set(locally_owned_elem); + comm_pattern = std::make_shared( + locally_owned_elem, ghost_indices, get_mpi_communicator()); + } + else + { + comm_pattern = + std::dynamic_pointer_cast( + communication_pattern); + AssertThrow(comm_pattern != nullptr, + ExcMessage("The communication pattern is not of type " + "Utilities::MPI::Partitioner.")); + } + Vector tmp_vector(comm_pattern); + + data.copy_to(tmp_vector.begin(), local_size()); + + // fill entries from ReadWriteVector into the SharedMPI vector, + // including ghost entries. this is not really efficient right now + // because indices are translated twice, once by nth_index_in_set(i) and + // once for operator() of tmp_vector + const IndexSet &v_stored = V.get_stored_elements(); + const size_type v_n_elements = v_stored.n_elements(); + switch (operation) + { + case VectorOperation::insert: + { + for (size_type i = 0; i < v_n_elements; ++i) + tmp_vector(v_stored.nth_index_in_set(i)) = V.local_element(i); + + break; + } + case VectorOperation::add: + { + for (size_type i = 0; i < v_n_elements; ++i) + tmp_vector(v_stored.nth_index_in_set(i)) += V.local_element(i); + + break; + } + case VectorOperation::min: + { + for (size_type i = 0; i < v_n_elements; ++i) + tmp_vector(v_stored.nth_index_in_set(i)) = + internal::get_min(tmp_vector(v_stored.nth_index_in_set(i)), + V.local_element(i)); + + break; + } + case VectorOperation::max: + { + for (size_type i = 0; i < v_n_elements; ++i) + tmp_vector(v_stored.nth_index_in_set(i)) = + internal::get_max(tmp_vector(v_stored.nth_index_in_set(i)), + V.local_element(i)); + + break; + } + default: + { + Assert(false, ExcMessage("This operation is not supported.")); + } + } + tmp_vector.compress(operation); + + data.copy_from(tmp_vector.begin(), local_size()); + } + + template + void + Vector::swap(Vector &v) + { +#ifdef DEAL_II_WITH_MPI + +# ifdef DEBUG + if (Utilities::MPI::job_supports_mpi()) + { + // make sure that there are not outstanding requests from updating + // ghost values or compress + int flag = 1; + if (update_ghost_values_requests.size() > 0) + { + const int ierr = MPI_Testall(update_ghost_values_requests.size(), + update_ghost_values_requests.data(), + &flag, + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + Assert(flag == 1, + ExcMessage( + "MPI found unfinished update_ghost_values() requests " + "when calling swap, which is not allowed.")); + } + if (compress_requests.size() > 0) + { + const int ierr = MPI_Testall(compress_requests.size(), + compress_requests.data(), + &flag, + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + Assert(flag == 1, + ExcMessage("MPI found unfinished compress() requests " + "when calling swap, which is not allowed.")); + } + } +# endif + + std::swap(compress_requests, v.compress_requests); + std::swap(update_ghost_values_requests, v.update_ghost_values_requests); +#endif + + std::swap(partitioner, v.partitioner); + std::swap(thread_loop_partitioner, v.thread_loop_partitioner); + std::swap(allocated_size, v.allocated_size); + std::swap(data, v.data); + std::swap(import_data, v.import_data); + std::swap(vector_is_ghosted, v.vector_is_ghosted); + } + + + + template + Vector & + Vector::operator=(const Number s) + { + const size_type this_size = local_size(); + if (this_size > 0) + { + dealii::internal::VectorOperations:: + functions::set( + thread_loop_partitioner, this_size, s, data); + } + + // if we call Vector::operator=0, we want to zero out all the entries + // plus ghosts. + if (s == Number()) + zero_out_ghosts(); + + return *this; + } + + + + template + void + Vector::reinit(const VectorSpaceVector &V, + const bool omit_zeroing_entries) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert(dynamic_cast(&V) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &down_V = dynamic_cast(V); + + reinit(down_V, omit_zeroing_entries); + } + + + + template + Vector & + Vector:: + operator+=(const VectorSpaceVector &vv) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert(dynamic_cast(&vv) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + + AssertDimension(local_size(), v.local_size()); + + dealii::internal::VectorOperations:: + functions::add_vector( + thread_loop_partitioner, partitioner->local_size(), v.data, data); + + if (vector_is_ghosted) + update_ghost_values(); + + return *this; + } + + + + template + Vector & + Vector:: + operator-=(const VectorSpaceVector &vv) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert(dynamic_cast(&vv) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + + AssertDimension(local_size(), v.local_size()); + + dealii::internal::VectorOperations:: + functions::subtract_vector( + thread_loop_partitioner, partitioner->local_size(), v.data, data); + + if (vector_is_ghosted) + update_ghost_values(); + + return *this; + } + + + + template + void + Vector::add(const Number a) + { + AssertIsFinite(a); + + dealii::internal::VectorOperations:: + functions::add_factor( + thread_loop_partitioner, partitioner->local_size(), a, data); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + void + Vector::add_local( + const Number a, + const VectorSpaceVector &vv) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert(dynamic_cast(&vv) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + + AssertIsFinite(a); + AssertDimension(local_size(), v.local_size()); + + // nothing to do if a is zero + if (a == Number(0.)) + return; + + dealii::internal::VectorOperations:: + functions::add_av( + thread_loop_partitioner, partitioner->local_size(), a, v.data, data); + } + + + + template + void + Vector::add(const Number a, + const VectorSpaceVector &vv) + { + add_local(a, vv); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + void + Vector::add(const Number a, + const VectorSpaceVector &vv, + const Number b, + const VectorSpaceVector &ww) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert(dynamic_cast(&vv) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + Assert(dynamic_cast(&ww) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &w = dynamic_cast(ww); + + AssertIsFinite(a); + AssertIsFinite(b); + + AssertDimension(local_size(), v.local_size()); + AssertDimension(local_size(), w.local_size()); + + dealii::internal::VectorOperations:: + functions::add_avpbw( + thread_loop_partitioner, + partitioner->local_size(), + a, + b, + v.data, + w.data, + data); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + void + Vector::add(const std::vector &indices, + const std::vector & values) + { + for (std::size_t i = 0; i < indices.size(); ++i) + { + this->operator()(indices[i]) += values[i]; + } + } + + + + template + void + Vector::sadd( + const Number x, + const Vector &v) + { + AssertIsFinite(x); + AssertDimension(local_size(), v.local_size()); + + dealii::internal::VectorOperations:: + functions::sadd_xv( + thread_loop_partitioner, partitioner->local_size(), x, v.data, data); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + void + Vector::sadd_local( + const Number x, + const Number a, + const VectorSpaceVector &vv) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert((dynamic_cast(&vv) != nullptr), + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + + AssertIsFinite(x); + AssertIsFinite(a); + AssertDimension(local_size(), v.local_size()); + + dealii::internal::VectorOperations:: + functions::sadd_xav( + thread_loop_partitioner, + partitioner->local_size(), + x, + a, + v.data, + data); + } + + + + template + void + Vector::sadd(const Number x, + const Number a, + const VectorSpaceVector &vv) + { + sadd_local(x, a, vv); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + void + Vector::sadd( + const Number x, + const Number a, + const Vector &v, + const Number b, + const Vector &w) + { + AssertIsFinite(x); + AssertIsFinite(a); + AssertIsFinite(b); + + AssertDimension(local_size(), v.local_size()); + AssertDimension(local_size(), w.local_size()); + + dealii::internal::VectorOperations:: + functions::sadd_xavbw( + thread_loop_partitioner, + partitioner->local_size(), + x, + a, + b, + v.data, + w.data, + data); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + Vector & + Vector::operator*=(const Number factor) + { + AssertIsFinite(factor); + + dealii::internal::VectorOperations:: + functions::multiply_factor( + thread_loop_partitioner, partitioner->local_size(), factor, data); + + if (vector_is_ghosted) + update_ghost_values(); + + return *this; + } + + + + template + Vector & + Vector::operator/=(const Number factor) + { + operator*=(static_cast(1.) / factor); + return *this; + } + + + + template + void + Vector::scale(const VectorSpaceVector &vv) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert(dynamic_cast(&vv) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + + AssertDimension(local_size(), v.local_size()); + + dealii::internal::VectorOperations:: + functions::scale( + thread_loop_partitioner, local_size(), v.data, data); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + void + Vector::equ(const Number a, + const VectorSpaceVector &vv) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert(dynamic_cast(&vv) != nullptr, + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + + AssertIsFinite(a); + AssertDimension(local_size(), v.local_size()); + + dealii::internal::VectorOperations:: + functions::equ_au( + thread_loop_partitioner, partitioner->local_size(), a, v.data, data); + + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + void + Vector::equ( + const Number a, + const Vector &v, + const Number b, + const Vector &w) + { + AssertIsFinite(a); + AssertIsFinite(b); + + AssertDimension(local_size(), v.local_size()); + AssertDimension(local_size(), w.local_size()); + + dealii::internal::VectorOperations:: + functions::equ_aubv( + thread_loop_partitioner, + partitioner->local_size(), + a, + b, + v.data, + w.data, + data); + + if (vector_is_ghosted) + update_ghost_values(); + } + + + + template + bool + Vector::all_zero() const + { + return (linfty_norm() == 0) ? true : false; + } + + + + template + template + Number + Vector::inner_product_local( + const Vector &v) const + { + if (PointerComparison::equal(this, &v)) + return norm_sqr_local(); + + AssertDimension(partitioner->local_size(), v.partitioner->local_size()); + + return dealii::internal::VectorOperations:: + functions::dot( + thread_loop_partitioner, partitioner->local_size(), v.data, data); + } + + + + template + Number Vector:: + operator*(const VectorSpaceVector &vv) const + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert((dynamic_cast(&vv) != nullptr), + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + + Number local_result = inner_product_local(v); + if (partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum(local_result, + partitioner->get_mpi_communicator()); + else + return local_result; + } + + + + template + typename Vector::real_type + Vector::norm_sqr_local() const + { + real_type sum; + + + dealii::internal::VectorOperations:: + functions::norm_2( + thread_loop_partitioner, partitioner->local_size(), sum, data); + + AssertIsFinite(sum); + + return sum; + } + + + + template + Number + Vector::mean_value_local() const + { + Assert(size() != 0, ExcEmptyObject()); + + if (partitioner->local_size() == 0) + return Number(); + + Number sum = ::dealii::internal::VectorOperations:: + functions::mean_value( + thread_loop_partitioner, partitioner->local_size(), data); + + return sum / real_type(partitioner->local_size()); + } + + + + template + Number + Vector::mean_value() const + { + Number local_result = mean_value_local(); + if (partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum(local_result * static_cast( + partitioner->local_size()), + partitioner->get_mpi_communicator()) / + static_cast(partitioner->size()); + else + return local_result; + } + + + + template + typename Vector::real_type + Vector::l1_norm_local() const + { + real_type sum; + + dealii::internal::VectorOperations:: + functions::norm_1( + thread_loop_partitioner, partitioner->local_size(), sum, data); + + return sum; + } + + + + template + typename Vector::real_type + Vector::l1_norm() const + { + real_type local_result = l1_norm_local(); + if (partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum(local_result, + partitioner->get_mpi_communicator()); + else + return local_result; + } + + + + template + typename Vector::real_type + Vector::norm_sqr() const + { + real_type local_result = norm_sqr_local(); + if (partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum(local_result, + partitioner->get_mpi_communicator()); + else + return local_result; + } + + + + template + typename Vector::real_type + Vector::l2_norm() const + { + return std::sqrt(norm_sqr()); + } + + + + template + typename Vector::real_type + Vector::lp_norm_local(const real_type p) const + { + real_type sum = 0.; + + dealii::internal::VectorOperations:: + functions::norm_p( + thread_loop_partitioner, partitioner->local_size(), sum, p, data); + + return std::pow(sum, 1. / p); + } + + + + template + typename Vector::real_type + Vector::lp_norm(const real_type p) const + { + const real_type local_result = lp_norm_local(p); + if (partitioner->n_mpi_processes() > 1) + return std::pow( + Utilities::MPI::sum(std::pow(local_result, p), + partitioner->get_mpi_communicator()), + static_cast(1.0 / p)); + else + return local_result; + } + + + + template + typename Vector::real_type + Vector::linfty_norm_local() const + { + real_type max = 0.; + + const size_type local_size = partitioner->local_size(); + internal::la_parallel_vector_templates_functions< + Number, + MemorySpaceType>::linfty_norm_local(data, local_size, max); + + return max; + } + + + + template + inline typename Vector::real_type + Vector::linfty_norm() const + { + const real_type local_result = linfty_norm_local(); + if (partitioner->n_mpi_processes() > 1) + return Utilities::MPI::max(local_result, + partitioner->get_mpi_communicator()); + else + return local_result; + } + + + + template + Number + Vector::add_and_dot_local( + const Number a, + const Vector &v, + const Vector &w) + { + const size_type vec_size = partitioner->local_size(); + AssertDimension(vec_size, v.local_size()); + AssertDimension(vec_size, w.local_size()); + + Number sum = dealii::internal::VectorOperations:: + functions::add_and_dot( + thread_loop_partitioner, vec_size, a, v.data, w.data, data); + + AssertIsFinite(sum); + + return sum; + } + + + + template + Number + Vector::add_and_dot( + const Number a, + const VectorSpaceVector &vv, + const VectorSpaceVector &ww) + { + // Downcast. Throws an exception if invalid. + using VectorType = Vector; + Assert((dynamic_cast(&vv) != nullptr), + ExcVectorTypeNotCompatible()); + const VectorType &v = dynamic_cast(vv); + Assert((dynamic_cast(&ww) != nullptr), + ExcVectorTypeNotCompatible()); + const VectorType &w = dynamic_cast(ww); + + Number local_result = add_and_dot_local(a, v, w); + if (partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum(local_result, + partitioner->get_mpi_communicator()); + else + return local_result; + } + + + + template + inline bool + Vector::partitioners_are_compatible( + const Utilities::MPI::Partitioner &part) const + { + return partitioner->is_compatible(part); + } + + + + template + inline bool + Vector::partitioners_are_globally_compatible( + const Utilities::MPI::Partitioner &part) const + { + return partitioner->is_globally_compatible(part); + } + + + + template + std::size_t + Vector::memory_consumption() const + { + std::size_t memory = sizeof(*this); + memory += sizeof(Number) * static_cast(allocated_size); + + // if the partitioner is shared between more processors, just count a + // fraction of that memory, since we're not actually using more memory + // for it. + if (partitioner.use_count() > 0) + memory += + partitioner->memory_consumption() / partitioner.use_count() + 1; + if (import_data.values != nullptr || import_data.values_dev != nullptr) + memory += (static_cast(partitioner->n_import_indices()) * + sizeof(Number)); + return memory; + } + + + + template + void + Vector::print(std::ostream & out, + const unsigned int precision, + const bool scientific, + const bool across) const + { + Assert(partitioner.get() != nullptr, ExcInternalError()); + AssertThrow(out, ExcIO()); + std::ios::fmtflags old_flags = out.flags(); + unsigned int old_precision = out.precision(precision); + + out.precision(precision); + if (scientific) + out.setf(std::ios::scientific, std::ios::floatfield); + else + out.setf(std::ios::fixed, std::ios::floatfield); + + // to make the vector write out all the information in order, use as + // many barriers as there are processors and start writing when it's our + // turn +#ifdef DEAL_II_WITH_MPI + if (partitioner->n_mpi_processes() > 1) + for (unsigned int i = 0; i < partitioner->this_mpi_process(); i++) + { + const int ierr = MPI_Barrier(partitioner->get_mpi_communicator()); + AssertThrowMPI(ierr); + } +#endif + + std::vector stored_elements(allocated_size); + data.copy_to(stored_elements.data(), allocated_size); + + out << "Process #" << partitioner->this_mpi_process() << std::endl + << "Local range: [" << partitioner->local_range().first << ", " + << partitioner->local_range().second + << "), global size: " << partitioner->size() << std::endl + << "Vector data:" << std::endl; + if (across) + for (size_type i = 0; i < partitioner->local_size(); ++i) + out << stored_elements[i] << ' '; + else + for (size_type i = 0; i < partitioner->local_size(); ++i) + out << stored_elements[i] << std::endl; + out << std::endl; + + if (vector_is_ghosted) + { + out << "Ghost entries (global index / value):" << std::endl; + if (across) + for (size_type i = 0; i < partitioner->n_ghost_indices(); ++i) + out << '(' << partitioner->ghost_indices().nth_index_in_set(i) + << '/' << stored_elements[partitioner->local_size() + i] + << ") "; + else + for (size_type i = 0; i < partitioner->n_ghost_indices(); ++i) + out << '(' << partitioner->ghost_indices().nth_index_in_set(i) + << '/' << stored_elements[partitioner->local_size() + i] + << ")" << std::endl; + out << std::endl; + } + out << std::flush; + +#ifdef DEAL_II_WITH_MPI + if (partitioner->n_mpi_processes() > 1) + { + int ierr = MPI_Barrier(partitioner->get_mpi_communicator()); + AssertThrowMPI(ierr); + + for (unsigned int i = partitioner->this_mpi_process() + 1; + i < partitioner->n_mpi_processes(); + i++) + { + ierr = MPI_Barrier(partitioner->get_mpi_communicator()); + AssertThrowMPI(ierr); + } + } +#endif + + AssertThrow(out, ExcIO()); + // reset output format + out.flags(old_flags); + out.precision(old_precision); + } + + } // end of namespace SharedMPI +} // end of namespace LinearAlgebra + + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index 7d0862977a..cb43b3e2b7 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -30,6 +30,7 @@ SET(_unity_include_src la_vector.cc la_parallel_vector.cc la_parallel_block_vector.cc + la_sm_vector.cc matrix_lib.cc matrix_out.cc precondition_block.cc @@ -80,6 +81,7 @@ SET(_inst la_vector.inst.in la_parallel_vector.inst.in la_parallel_block_vector.inst.in + la_sm_vector.inst.in precondition_block.inst.in relaxation_block.inst.in read_write_vector.inst.in diff --git a/source/lac/la_sm_vector.cc b/source/lac/la_sm_vector.cc new file mode 100644 index 0000000000..28d04e2dc3 --- /dev/null +++ b/source/lac/la_sm_vector.cc @@ -0,0 +1,49 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include +#include + +DEAL_II_NAMESPACE_OPEN + +#include "la_sm_vector.inst" + +// do a few functions that currently don't fit the scheme because they have +// two template arguments that need to be different (the case of same +// arguments is covered by the default copy constructor and copy operator that +// is declared separately) + +namespace LinearAlgebra +{ + namespace SharedMPI + { +#define TEMPL_COPY_CONSTRUCTOR(S1, S2) \ + template Vector \ + &Vector::operator= \ + (const Vector &) + + TEMPL_COPY_CONSTRUCTOR(double, float); + TEMPL_COPY_CONSTRUCTOR(float, double); +#ifdef DEAL_II_WITH_COMPLEX_VALUES + TEMPL_COPY_CONSTRUCTOR(std::complex, std::complex); + TEMPL_COPY_CONSTRUCTOR(std::complex, std::complex); +#endif + +#undef TEMPL_COPY_CONSTRUCTOR + } // namespace SharedMPI +} // namespace LinearAlgebra + + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/lac/la_sm_vector.inst.in b/source/lac/la_sm_vector.inst.in new file mode 100644 index 0000000000..d58cdb2837 --- /dev/null +++ b/source/lac/la_sm_vector.inst.in @@ -0,0 +1,73 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2011 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +for (SCALAR : REAL_AND_COMPLEX_SCALARS) + { + namespace LinearAlgebra + \{ + namespace SharedMPI + \{ + template class Vector; + template void + Vector::import< + ::dealii::MemorySpace::Host>( + const Vector &, + VectorOperation::values); + \} + \} + } + +for (S1 : REAL_AND_COMPLEX_SCALARS; S2 : REAL_SCALARS) + { + namespace LinearAlgebra + \{ + namespace SharedMPI + \{ + template void + Vector::reinit( + const Vector &, + const bool); + template S1 + Vector::inner_product_local( + const Vector &) const; + template void + Vector::copy_locally_owned_data_from< + S2>(const Vector &); + \} + \} + } + + +for (S1, S2 : COMPLEX_SCALARS) + { + namespace LinearAlgebra + \{ + namespace SharedMPI + \{ + template void + Vector::reinit( + const Vector &, + const bool); + template S1 + Vector::inner_product_local( + const Vector &) const; + template void + Vector::copy_locally_owned_data_from< + S2>(const Vector &); + \} + \} + } -- 2.39.5