#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
#include <deal.II/lac/matrix_block.h>
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/sparse_matrix_ez.h>
#include <deal.II/lac/sparsity_pattern.h>
#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/block_sparsity_pattern.h>
-# include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-# include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+# include <deal.II/lac/petsc_block_sparse_matrix.h>
# include <deal.II/lac/petsc_precondition.h>
# include <deal.II/lac/petsc_solver.h>
+# include <deal.II/lac/petsc_sparse_matrix.h>
DEAL_II_NAMESPACE_OPEN
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/lapack_support.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/exceptions.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/read_write_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector_operations_internal.h>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_petsc_block_sparse_matrix_h
+#define dealii_petsc_block_sparse_matrix_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_PETSC
+
+# include <deal.II/base/table.h>
+
+# include <deal.II/lac/block_matrix_base.h>
+# include <deal.II/lac/block_sparsity_pattern.h>
+# include <deal.II/lac/exceptions.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_sparse_matrix.h>
+
+# include <cmath>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+
+namespace PETScWrappers
+{
+ namespace MPI
+ {
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * Blocked sparse matrix based on the PETScWrappers::MPI::SparseMatrix
+ * class. This class implements the functions that are specific to the
+ * PETSc SparseMatrix base objects for a blocked sparse matrix, and leaves
+ * the actual work relaying most of the calls to the individual blocks to
+ * the functions implemented in the base class. See there also for a
+ * description of when this class is useful.
+ *
+ * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices
+ * do not have external objects for the sparsity patterns. Thus, one does
+ * not determine the size of the individual blocks of a block matrix of
+ * this type by attaching a block sparsity pattern, but by calling
+ * reinit() to set the number of blocks and then by setting the size of
+ * each block separately. In order to fix the data structures of the block
+ * matrix, it is then necessary to let it know that we have changed the
+ * sizes of the underlying matrices. For this, one has to call the
+ * collect_sizes() function, for much the same reason as is documented
+ * with the BlockSparsityPattern class.
+ *
+ * @ingroup Matrix1 @see
+ * @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
+ class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
+ {
+ public:
+ /**
+ * Typedef the base class for simpler access to its own alias.
+ */
+ using BaseClass = BlockMatrixBase<SparseMatrix>;
+
+ /**
+ * Typedef the type of the underlying matrix.
+ */
+ using BlockType = BaseClass::BlockType;
+
+ /**
+ * Import the alias from the base class.
+ */
+ using value_type = BaseClass::value_type;
+ using pointer = BaseClass::pointer;
+ using const_pointer = BaseClass::const_pointer;
+ using reference = BaseClass::reference;
+ using const_reference = BaseClass::const_reference;
+ using size_type = BaseClass::size_type;
+ using iterator = BaseClass::iterator;
+ using const_iterator = BaseClass::const_iterator;
+
+ /**
+ * Constructor; initializes the matrix to be empty, without any
+ * structure, i.e. the matrix is not usable at all. This constructor is
+ * therefore only useful for matrices which are members of a class. All
+ * other matrices should be created at a point in the data flow where
+ * all necessary information is available.
+ *
+ * You have to initialize the matrix before usage with
+ * reinit(BlockSparsityPattern). The number of blocks per row and column
+ * are then determined by that function.
+ */
+ BlockSparseMatrix() = default;
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparseMatrix() override = default;
+
+ /**
+ * Pseudo copy operator only copying empty objects. The sizes of the
+ * block matrices need to be the same.
+ */
+ BlockSparseMatrix &
+ operator=(const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to a matrix. Since this does usually
+ * not make much sense (should we set all matrix entries to this value?
+ * Only the nonzero entries of the sparsity pattern?), this operation is
+ * only allowed if the actual value to be assigned is zero. This
+ * operator only exists to allow for the obvious notation
+ * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
+ * keep the sparsity pattern previously used.
+ */
+ BlockSparseMatrix &
+ operator=(const double d);
+
+ /**
+ * Resize the matrix, by setting the number of block rows and columns.
+ * This deletes all blocks and replaces them with uninitialized ones,
+ * i.e. ones for which also the sizes are not yet set. You have to do
+ * that by calling the @p reinit functions of the blocks themselves. Do
+ * not forget to call collect_sizes() after that on this object.
+ *
+ * The reason that you have to set sizes of the blocks yourself is that
+ * the sizes may be varying, the maximum number of elements per row may
+ * be varying, etc. It is simpler not to reproduce the interface of the
+ * SparsityPattern class here but rather let the user call whatever
+ * function she desires.
+ */
+ void
+ reinit(const size_type n_block_rows, const size_type n_block_columns);
+
+
+ /**
+ * Efficiently reinit the block matrix for a parallel computation. Only
+ * the BlockSparsityPattern of the Simple type can efficiently store
+ * large sparsity patterns in parallel, so this is the only supported
+ * argument. The IndexSets describe the locally owned range of DoFs for
+ * each block. Note that the IndexSets needs to be ascending and 1:1.
+ * For a symmetric structure hand in the same vector for the first two
+ * arguments.
+ */
+ void
+ reinit(const std::vector<IndexSet> & rows,
+ const std::vector<IndexSet> & cols,
+ const BlockDynamicSparsityPattern &bdsp,
+ const MPI_Comm & com);
+
+
+ /**
+ * Same as above but for a symmetric structure only.
+ */
+ void
+ reinit(const std::vector<IndexSet> & sizes,
+ const BlockDynamicSparsityPattern &bdsp,
+ const MPI_Comm & com);
+
+
+
+ /**
+ * Matrix-vector multiplication: let $dst = M*src$ with $M$ being this
+ * matrix.
+ */
+ void
+ vmult(BlockVector &dst, const BlockVector &src) const;
+
+ /**
+ * Matrix-vector multiplication. Just like the previous function, but
+ * only applicable if the matrix has only one block column.
+ */
+ void
+ vmult(BlockVector &dst, const Vector &src) const;
+
+ /**
+ * Matrix-vector multiplication. Just like the previous function, but
+ * only applicable if the matrix has only one block row.
+ */
+ void
+ vmult(Vector &dst, const BlockVector &src) const;
+
+ /**
+ * Matrix-vector multiplication. Just like the previous function, but
+ * only applicable if the matrix has only one block.
+ */
+ void
+ vmult(Vector &dst, const Vector &src) const;
+
+ /**
+ * Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this
+ * matrix. This function does the same as vmult() but takes the
+ * transposed matrix.
+ */
+ void
+ Tvmult(BlockVector &dst, const BlockVector &src) const;
+
+ /**
+ * Matrix-vector multiplication. Just like the previous function, but
+ * only applicable if the matrix has only one block row.
+ */
+ void
+ Tvmult(BlockVector &dst, const Vector &src) const;
+
+ /**
+ * Matrix-vector multiplication. Just like the previous function, but
+ * only applicable if the matrix has only one block column.
+ */
+ void
+ Tvmult(Vector &dst, const BlockVector &src) const;
+
+ /**
+ * Matrix-vector multiplication. Just like the previous function, but
+ * only applicable if the matrix has only one block.
+ */
+ void
+ Tvmult(Vector &dst, const Vector &src) const;
+
+ /**
+ * This function collects the sizes of the sub-objects and stores them
+ * in internal arrays, in order to be able to relay global indices into
+ * the matrix to indices into the subobjects. You *must* call this
+ * function each time after you have changed the size of the sub-
+ * objects.
+ */
+ void
+ collect_sizes();
+
+ /**
+ * Return the partitioning of the domain space of this matrix, i.e., the
+ * partitioning of the vectors this matrix has to be multiplied with.
+ */
+ std::vector<IndexSet>
+ locally_owned_domain_indices() const;
+
+ /**
+ * Return the partitioning of the range space of this matrix, i.e., the
+ * partitioning of the vectors that are result from matrix-vector
+ * products.
+ */
+ std::vector<IndexSet>
+ locally_owned_range_indices() const;
+
+ /**
+ * Return a reference to the MPI communicator object in use with this
+ * matrix.
+ */
+ const MPI_Comm &
+ get_mpi_communicator() const;
+
+ /**
+ * Make the clear() function in the base class visible, though it is
+ * protected.
+ */
+ using BlockMatrixBase<SparseMatrix>::clear;
+ };
+
+
+
+ /*@}*/
+
+ // ------------- inline and template functions -----------------
+
+ inline BlockSparseMatrix &
+ BlockSparseMatrix::operator=(const double d)
+ {
+ Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue());
+
+ for (size_type r = 0; r < this->n_block_rows(); ++r)
+ for (size_type c = 0; c < this->n_block_cols(); ++c)
+ this->block(r, c) = d;
+
+ return *this;
+ }
+
+
+
+ inline void
+ BlockSparseMatrix::vmult(BlockVector &dst, const BlockVector &src) const
+ {
+ BaseClass::vmult_block_block(dst, src);
+ }
+
+
+
+ inline void
+ BlockSparseMatrix::vmult(BlockVector &dst, const Vector &src) const
+ {
+ BaseClass::vmult_block_nonblock(dst, src);
+ }
+
+
+
+ inline void
+ BlockSparseMatrix::vmult(Vector &dst, const BlockVector &src) const
+ {
+ BaseClass::vmult_nonblock_block(dst, src);
+ }
+
+
+
+ inline void
+ BlockSparseMatrix::vmult(Vector &dst, const Vector &src) const
+ {
+ BaseClass::vmult_nonblock_nonblock(dst, src);
+ }
+
+
+ inline void
+ BlockSparseMatrix::Tvmult(BlockVector &dst, const BlockVector &src) const
+ {
+ BaseClass::Tvmult_block_block(dst, src);
+ }
+
+
+
+ inline void
+ BlockSparseMatrix::Tvmult(BlockVector &dst, const Vector &src) const
+ {
+ BaseClass::Tvmult_block_nonblock(dst, src);
+ }
+
+
+
+ inline void
+ BlockSparseMatrix::Tvmult(Vector &dst, const BlockVector &src) const
+ {
+ BaseClass::Tvmult_nonblock_block(dst, src);
+ }
+
+
+
+ inline void
+ BlockSparseMatrix::Tvmult(Vector &dst, const Vector &src) const
+ {
+ BaseClass::Tvmult_nonblock_nonblock(dst, src);
+ }
+
+ } // namespace MPI
+
+} // namespace PETScWrappers
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+
+#endif // DEAL_II_WITH_PETSC
+
+#endif // dealii_petsc_block_sparse_matrix_h
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_petsc_block_vector_h
+#define dealii_petsc_block_vector_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_PETSC
+
+# include <deal.II/lac/block_indices.h>
+# include <deal.II/lac/block_vector_base.h>
+# include <deal.II/lac/exceptions.h>
+# include <deal.II/lac/petsc_vector.h>
+# include <deal.II/lac/vector_type_traits.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace PETScWrappers
+{
+ // forward declaration
+ class BlockVector;
+
+ namespace MPI
+ {
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * An implementation of block vectors based on the parallel vector class
+ * implemented in PETScWrappers. While the base class provides for most of
+ * the interface, this class handles the actual allocation of vectors and
+ * provides functions that are specific to the underlying vector type.
+ *
+ * The model of distribution of data is such that each of the blocks is
+ * distributed across all MPI processes named in the MPI communicator.
+ * I.e. we don't just distribute the whole vector, but each component. In
+ * the constructors and reinit() functions, one therefore not only has to
+ * specify the sizes of the individual blocks, but also the number of
+ * elements of each of these blocks to be stored on the local process.
+ *
+ * @ingroup Vectors @see
+ * @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
+ class BlockVector : public BlockVectorBase<Vector>
+ {
+ public:
+ /**
+ * Typedef the base class for simpler access to its own alias.
+ */
+ using BaseClass = BlockVectorBase<Vector>;
+
+ /**
+ * Typedef the type of the underlying vector.
+ */
+ using BlockType = BaseClass::BlockType;
+
+ /**
+ * Import the alias from the base class.
+ */
+ using value_type = BaseClass::value_type;
+ using pointer = BaseClass::pointer;
+ using const_pointer = BaseClass::const_pointer;
+ using reference = BaseClass::reference;
+ using const_reference = BaseClass::const_reference;
+ using size_type = BaseClass::size_type;
+ using iterator = BaseClass::iterator;
+ using const_iterator = BaseClass::const_iterator;
+
+ /**
+ * Default constructor. Generate an empty vector without any blocks.
+ */
+ BlockVector() = default;
+
+ /**
+ * Constructor. Generate a block vector with @p n_blocks blocks, each of
+ * which is a parallel vector across @p communicator with @p block_size
+ * elements of which @p local_size elements are stored on the present
+ * process.
+ */
+ explicit BlockVector(const unsigned int n_blocks,
+ const MPI_Comm & communicator,
+ const size_type block_size,
+ const size_type local_size);
+
+ /**
+ * Copy constructor. Set all the properties of the parallel vector to
+ * those of the given argument and copy the elements.
+ */
+ BlockVector(const BlockVector &V);
+
+ /**
+ * Constructor. Set the number of blocks to <tt>block_sizes.size()</tt>
+ * and initialize each block with <tt>block_sizes[i]</tt> zero elements.
+ * The individual blocks are distributed across the given communicator,
+ * and each store <tt>local_elements[i]</tt> elements on the present
+ * process.
+ */
+ BlockVector(const std::vector<size_type> &block_sizes,
+ const MPI_Comm & communicator,
+ const std::vector<size_type> &local_elements);
+
+ /**
+ * Create a BlockVector with parallel_partitioning.size() blocks, each
+ * initialized with the given IndexSet.
+ */
+ explicit BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Same as above, but include ghost elements
+ */
+ BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
+ const std::vector<IndexSet> &ghost_indices,
+ const MPI_Comm & communicator);
+
+
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector() override = default;
+
+ /**
+ * Copy operator: fill all components of the vector that are locally
+ * stored with the given scalar value.
+ */
+ BlockVector &
+ operator=(const value_type s);
+
+ /**
+ * Copy operator for arguments of the same type.
+ */
+ BlockVector &
+ operator=(const BlockVector &V);
+
+ /**
+ * Reinitialize the BlockVector to contain @p n_blocks of size @p
+ * block_size, each of which stores @p local_size elements locally. The
+ * @p communicator argument denotes which MPI channel each of these
+ * blocks shall communicate.
+ *
+ * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
+ * zeros.
+ */
+ void
+ reinit(const unsigned int n_blocks,
+ const MPI_Comm & communicator,
+ const size_type block_size,
+ const size_type local_size,
+ const bool omit_zeroing_entries = false);
+
+ /**
+ * Reinitialize the BlockVector such that it contains
+ * <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
+ * dimension <tt>block_sizes[i]</tt>. Each of them stores
+ * <tt>local_sizes[i]</tt> elements on the present process.
+ *
+ * If the number of blocks is the same as before this function was
+ * called, all vectors remain the same and reinit() is called for each
+ * vector.
+ *
+ * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
+ * zeros.
+ *
+ * Note that you must call this (or the other reinit() functions)
+ * function, rather than calling the reinit() functions of an individual
+ * block, to allow the block vector to update its caches of vector
+ * sizes. If you call reinit() of one of the blocks, then subsequent
+ * actions on this object may yield unpredictable results since they may
+ * be routed to the wrong block.
+ */
+ void
+ reinit(const std::vector<size_type> &block_sizes,
+ const MPI_Comm & communicator,
+ const std::vector<size_type> &local_sizes,
+ const bool omit_zeroing_entries = false);
+
+ /**
+ * Change the dimension to that of the vector <tt>V</tt>. The same
+ * applies as for the other reinit() function.
+ *
+ * The elements of <tt>V</tt> are not copied, i.e. this function is the
+ * same as calling <tt>reinit (V.size(), omit_zeroing_entries)</tt>.
+ *
+ * Note that you must call this (or the other reinit() functions)
+ * function, rather than calling the reinit() functions of an individual
+ * block, to allow the block vector to update its caches of vector
+ * sizes. If you call reinit() on one of the blocks, then subsequent
+ * actions on this object may yield unpredictable results since they may
+ * be routed to the wrong block.
+ */
+ void
+ reinit(const BlockVector &V, const bool omit_zeroing_entries = false);
+
+ /**
+ * Reinitialize the BlockVector using IndexSets. See the constructor
+ * with the same arguments for details.
+ */
+ void
+ reinit(const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm & communicator);
+
+ /**
+ * Same as above but include ghost entries.
+ */
+ void
+ reinit(const std::vector<IndexSet> ¶llel_partitioning,
+ const std::vector<IndexSet> &ghost_entries,
+ const MPI_Comm & communicator);
+
+ /**
+ * Change the number of blocks to <tt>num_blocks</tt>. The individual
+ * blocks will get initialized with zero size, so it is assumed that the
+ * user resizes the individual blocks by herself in an appropriate way,
+ * and calls <tt>collect_sizes</tt> afterwards.
+ */
+ void
+ reinit(const unsigned int num_blocks);
+
+ /**
+ * Return if this vector is a ghosted vector (and thus read-only).
+ */
+ bool
+ has_ghost_elements() const;
+
+ /**
+ * Return a reference to the MPI communicator object in use with this
+ * vector.
+ */
+ const MPI_Comm &
+ get_mpi_communicator() const;
+
+ /**
+ * Swap the contents of this vector and the other vector <tt>v</tt>. One
+ * could do this operation with a temporary variable and copying over
+ * the data elements, but this function is significantly more efficient
+ * since it only swaps the pointers to the data of the two vectors and
+ * therefore does not need to allocate temporary storage and move data
+ * around.
+ *
+ * Limitation: right now this function only works if both vectors have
+ * the same number of blocks. If needed, the numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analogous to the swap() function of all C++
+ * standard containers. Also, there is a global function swap(u,v) that
+ * simply calls <tt>u.swap(v)</tt>, again in analogy to standard
+ * functions.
+ */
+ void
+ swap(BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void
+ print(std::ostream & out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcIteratorRangeDoesNotMatchVectorSize);
+ /**
+ * Exception
+ */
+ DeclException0(ExcNonMatchingBlockVectors);
+ };
+
+ /*@}*/
+
+ /*--------------------- Inline functions --------------------------------*/
+
+ inline BlockVector::BlockVector(const unsigned int n_blocks,
+ const MPI_Comm & communicator,
+ const size_type block_size,
+ const size_type local_size)
+ {
+ reinit(n_blocks, communicator, block_size, local_size);
+ }
+
+
+
+ inline BlockVector::BlockVector(
+ const std::vector<size_type> &block_sizes,
+ const MPI_Comm & communicator,
+ const std::vector<size_type> &local_elements)
+ {
+ reinit(block_sizes, communicator, local_elements, false);
+ }
+
+
+ inline BlockVector::BlockVector(const BlockVector &v)
+ : BlockVectorBase<Vector>()
+ {
+ this->components.resize(v.n_blocks());
+ this->block_indices = v.block_indices;
+
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ this->components[i] = v.components[i];
+ }
+
+ inline BlockVector::BlockVector(
+ const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm & communicator)
+ {
+ reinit(parallel_partitioning, communicator);
+ }
+
+ inline BlockVector::BlockVector(
+ const std::vector<IndexSet> ¶llel_partitioning,
+ const std::vector<IndexSet> &ghost_indices,
+ const MPI_Comm & communicator)
+ {
+ reinit(parallel_partitioning, ghost_indices, communicator);
+ }
+
+ inline BlockVector &
+ BlockVector::operator=(const value_type s)
+ {
+ BaseClass::operator=(s);
+ return *this;
+ }
+
+ inline BlockVector &
+ BlockVector::operator=(const BlockVector &v)
+ {
+ // we only allow assignment to vectors with the same number of blocks
+ // or to an empty BlockVector
+ Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
+ ExcDimensionMismatch(n_blocks(), v.n_blocks()));
+
+ if (this->n_blocks() != v.n_blocks())
+ reinit(v.n_blocks());
+
+ for (size_type i = 0; i < this->n_blocks(); ++i)
+ this->components[i] = v.block(i);
+
+ collect_sizes();
+
+ return *this;
+ }
+
+
+
+ inline void
+ BlockVector::reinit(const unsigned int n_blocks,
+ const MPI_Comm & communicator,
+ const size_type block_size,
+ const size_type local_size,
+ const bool omit_zeroing_entries)
+ {
+ reinit(std::vector<size_type>(n_blocks, block_size),
+ communicator,
+ std::vector<size_type>(n_blocks, local_size),
+ omit_zeroing_entries);
+ }
+
+
+
+ inline void
+ BlockVector::reinit(const std::vector<size_type> &block_sizes,
+ const MPI_Comm & communicator,
+ const std::vector<size_type> &local_sizes,
+ const bool omit_zeroing_entries)
+ {
+ this->block_indices.reinit(block_sizes);
+ if (this->components.size() != this->n_blocks())
+ this->components.resize(this->n_blocks());
+
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ this->components[i].reinit(communicator,
+ block_sizes[i],
+ local_sizes[i],
+ omit_zeroing_entries);
+ }
+
+
+ inline void
+ BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
+ {
+ this->block_indices = v.get_block_indices();
+ if (this->components.size() != this->n_blocks())
+ this->components.resize(this->n_blocks());
+
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ block(i).reinit(v.block(i), omit_zeroing_entries);
+ }
+
+ inline void
+ BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm & communicator)
+ {
+ std::vector<size_type> sizes(parallel_partitioning.size());
+ for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
+ sizes[i] = parallel_partitioning[i].size();
+
+ this->block_indices.reinit(sizes);
+ if (this->components.size() != this->n_blocks())
+ this->components.resize(this->n_blocks());
+
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ block(i).reinit(parallel_partitioning[i], communicator);
+ }
+
+ inline void
+ BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
+ const std::vector<IndexSet> &ghost_entries,
+ const MPI_Comm & communicator)
+ {
+ std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
+ for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
+ sizes[i] = parallel_partitioning[i].size();
+
+ this->block_indices.reinit(sizes);
+ if (this->components.size() != this->n_blocks())
+ this->components.resize(this->n_blocks());
+
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ block(i).reinit(parallel_partitioning[i],
+ ghost_entries[i],
+ communicator);
+ }
+
+
+
+ inline const MPI_Comm &
+ BlockVector::get_mpi_communicator() const
+ {
+ return block(0).get_mpi_communicator();
+ }
+
+ inline bool
+ BlockVector::has_ghost_elements() const
+ {
+ bool ghosted = block(0).has_ghost_elements();
+# ifdef DEBUG
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError());
+# endif
+ return ghosted;
+ }
+
+
+ inline void
+ BlockVector::swap(BlockVector &v)
+ {
+ std::swap(this->components, v.components);
+
+ ::dealii::swap(this->block_indices, v.block_indices);
+ }
+
+
+
+ inline void
+ BlockVector::print(std::ostream & out,
+ const unsigned int precision,
+ const bool scientific,
+ const bool across) const
+ {
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ {
+ if (across)
+ out << 'C' << i << ':';
+ else
+ out << "Component " << i << std::endl;
+ this->components[i].print(out, precision, scientific, across);
+ }
+ }
+
+
+
+ /**
+ * Global function which overloads the default implementation of the C++
+ * standard library which uses a temporary object. The function simply
+ * exchanges the data of the two vectors.
+ *
+ * @relatesalso PETScWrappers::MPI::BlockVector
+ * @author Wolfgang Bangerth, 2000
+ */
+ inline void
+ swap(BlockVector &u, BlockVector &v)
+ {
+ u.swap(v);
+ }
+
+ } // namespace MPI
+
+} // namespace PETScWrappers
+
+namespace internal
+{
+ namespace LinearOperatorImplementation
+ {
+ template <typename>
+ class ReinitHelper;
+
+ /**
+ * A helper class used internally in linear_operator.h. Specialization for
+ * PETScWrappers::MPI::BlockVector.
+ */
+ template <>
+ class ReinitHelper<PETScWrappers::MPI::BlockVector>
+ {
+ public:
+ template <typename Matrix>
+ static void
+ reinit_range_vector(const Matrix & matrix,
+ PETScWrappers::MPI::BlockVector &v,
+ bool /*omit_zeroing_entries*/)
+ {
+ v.reinit(matrix.locally_owned_range_indices(),
+ matrix.get_mpi_communicator());
+ }
+
+ template <typename Matrix>
+ static void
+ reinit_domain_vector(const Matrix & matrix,
+ PETScWrappers::MPI::BlockVector &v,
+ bool /*omit_zeroing_entries*/)
+ {
+ v.reinit(matrix.locally_owned_domain_indices(),
+ matrix.get_mpi_communicator());
+ }
+ };
+
+ } // namespace LinearOperatorImplementation
+} /* namespace internal */
+
+
+/**
+ * Declare dealii::PETScWrappers::MPI::BlockVector as distributed vector.
+ *
+ * @author Uwe Koecher, 2017
+ */
+template <>
+struct is_serial_vector<PETScWrappers::MPI::BlockVector> : std::false_type
+{};
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_WITH_PETSC
+
+#endif
# ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_matrix_base.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_vector.h>
DEAL_II_NAMESPACE_OPEN
#include <deal.II/base/config.h>
-#ifdef DEAL_II_WITH_PETSC
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
-# include <deal.II/base/table.h>
-
-# include <deal.II/lac/block_matrix_base.h>
-# include <deal.II/lac/block_sparsity_pattern.h>
-# include <deal.II/lac/exceptions.h>
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-
-# include <cmath>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-
-namespace PETScWrappers
-{
- namespace MPI
- {
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * Blocked sparse matrix based on the PETScWrappers::MPI::SparseMatrix
- * class. This class implements the functions that are specific to the
- * PETSc SparseMatrix base objects for a blocked sparse matrix, and leaves
- * the actual work relaying most of the calls to the individual blocks to
- * the functions implemented in the base class. See there also for a
- * description of when this class is useful.
- *
- * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices
- * do not have external objects for the sparsity patterns. Thus, one does
- * not determine the size of the individual blocks of a block matrix of
- * this type by attaching a block sparsity pattern, but by calling
- * reinit() to set the number of blocks and then by setting the size of
- * each block separately. In order to fix the data structures of the block
- * matrix, it is then necessary to let it know that we have changed the
- * sizes of the underlying matrices. For this, one has to call the
- * collect_sizes() function, for much the same reason as is documented
- * with the BlockSparsityPattern class.
- *
- * @ingroup Matrix1 @see
- * @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
- class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
- {
- public:
- /**
- * Typedef the base class for simpler access to its own alias.
- */
- using BaseClass = BlockMatrixBase<SparseMatrix>;
-
- /**
- * Typedef the type of the underlying matrix.
- */
- using BlockType = BaseClass::BlockType;
-
- /**
- * Import the alias from the base class.
- */
- using value_type = BaseClass::value_type;
- using pointer = BaseClass::pointer;
- using const_pointer = BaseClass::const_pointer;
- using reference = BaseClass::reference;
- using const_reference = BaseClass::const_reference;
- using size_type = BaseClass::size_type;
- using iterator = BaseClass::iterator;
- using const_iterator = BaseClass::const_iterator;
-
- /**
- * Constructor; initializes the matrix to be empty, without any
- * structure, i.e. the matrix is not usable at all. This constructor is
- * therefore only useful for matrices which are members of a class. All
- * other matrices should be created at a point in the data flow where
- * all necessary information is available.
- *
- * You have to initialize the matrix before usage with
- * reinit(BlockSparsityPattern). The number of blocks per row and column
- * are then determined by that function.
- */
- BlockSparseMatrix() = default;
-
- /**
- * Destructor.
- */
- ~BlockSparseMatrix() override = default;
-
- /**
- * Pseudo copy operator only copying empty objects. The sizes of the
- * block matrices need to be the same.
- */
- BlockSparseMatrix &
- operator=(const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to a matrix. Since this does usually
- * not make much sense (should we set all matrix entries to this value?
- * Only the nonzero entries of the sparsity pattern?), this operation is
- * only allowed if the actual value to be assigned is zero. This
- * operator only exists to allow for the obvious notation
- * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
- * keep the sparsity pattern previously used.
- */
- BlockSparseMatrix &
- operator=(const double d);
-
- /**
- * Resize the matrix, by setting the number of block rows and columns.
- * This deletes all blocks and replaces them with uninitialized ones,
- * i.e. ones for which also the sizes are not yet set. You have to do
- * that by calling the @p reinit functions of the blocks themselves. Do
- * not forget to call collect_sizes() after that on this object.
- *
- * The reason that you have to set sizes of the blocks yourself is that
- * the sizes may be varying, the maximum number of elements per row may
- * be varying, etc. It is simpler not to reproduce the interface of the
- * SparsityPattern class here but rather let the user call whatever
- * function she desires.
- */
- void
- reinit(const size_type n_block_rows, const size_type n_block_columns);
-
-
- /**
- * Efficiently reinit the block matrix for a parallel computation. Only
- * the BlockSparsityPattern of the Simple type can efficiently store
- * large sparsity patterns in parallel, so this is the only supported
- * argument. The IndexSets describe the locally owned range of DoFs for
- * each block. Note that the IndexSets needs to be ascending and 1:1.
- * For a symmetric structure hand in the same vector for the first two
- * arguments.
- */
- void
- reinit(const std::vector<IndexSet> & rows,
- const std::vector<IndexSet> & cols,
- const BlockDynamicSparsityPattern &bdsp,
- const MPI_Comm & com);
-
-
- /**
- * Same as above but for a symmetric structure only.
- */
- void
- reinit(const std::vector<IndexSet> & sizes,
- const BlockDynamicSparsityPattern &bdsp,
- const MPI_Comm & com);
-
-
-
- /**
- * Matrix-vector multiplication: let $dst = M*src$ with $M$ being this
- * matrix.
- */
- void
- vmult(BlockVector &dst, const BlockVector &src) const;
-
- /**
- * Matrix-vector multiplication. Just like the previous function, but
- * only applicable if the matrix has only one block column.
- */
- void
- vmult(BlockVector &dst, const Vector &src) const;
-
- /**
- * Matrix-vector multiplication. Just like the previous function, but
- * only applicable if the matrix has only one block row.
- */
- void
- vmult(Vector &dst, const BlockVector &src) const;
-
- /**
- * Matrix-vector multiplication. Just like the previous function, but
- * only applicable if the matrix has only one block.
- */
- void
- vmult(Vector &dst, const Vector &src) const;
-
- /**
- * Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this
- * matrix. This function does the same as vmult() but takes the
- * transposed matrix.
- */
- void
- Tvmult(BlockVector &dst, const BlockVector &src) const;
-
- /**
- * Matrix-vector multiplication. Just like the previous function, but
- * only applicable if the matrix has only one block row.
- */
- void
- Tvmult(BlockVector &dst, const Vector &src) const;
-
- /**
- * Matrix-vector multiplication. Just like the previous function, but
- * only applicable if the matrix has only one block column.
- */
- void
- Tvmult(Vector &dst, const BlockVector &src) const;
-
- /**
- * Matrix-vector multiplication. Just like the previous function, but
- * only applicable if the matrix has only one block.
- */
- void
- Tvmult(Vector &dst, const Vector &src) const;
-
- /**
- * This function collects the sizes of the sub-objects and stores them
- * in internal arrays, in order to be able to relay global indices into
- * the matrix to indices into the subobjects. You *must* call this
- * function each time after you have changed the size of the sub-
- * objects.
- */
- void
- collect_sizes();
-
- /**
- * Return the partitioning of the domain space of this matrix, i.e., the
- * partitioning of the vectors this matrix has to be multiplied with.
- */
- std::vector<IndexSet>
- locally_owned_domain_indices() const;
-
- /**
- * Return the partitioning of the range space of this matrix, i.e., the
- * partitioning of the vectors that are result from matrix-vector
- * products.
- */
- std::vector<IndexSet>
- locally_owned_range_indices() const;
-
- /**
- * Return a reference to the MPI communicator object in use with this
- * matrix.
- */
- const MPI_Comm &
- get_mpi_communicator() const;
-
- /**
- * Make the clear() function in the base class visible, though it is
- * protected.
- */
- using BlockMatrixBase<SparseMatrix>::clear;
- };
-
-
-
- /*@}*/
-
- // ------------- inline and template functions -----------------
-
- inline BlockSparseMatrix &
- BlockSparseMatrix::operator=(const double d)
- {
- Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue());
-
- for (size_type r = 0; r < this->n_block_rows(); ++r)
- for (size_type c = 0; c < this->n_block_cols(); ++c)
- this->block(r, c) = d;
-
- return *this;
- }
-
-
-
- inline void
- BlockSparseMatrix::vmult(BlockVector &dst, const BlockVector &src) const
- {
- BaseClass::vmult_block_block(dst, src);
- }
-
-
-
- inline void
- BlockSparseMatrix::vmult(BlockVector &dst, const Vector &src) const
- {
- BaseClass::vmult_block_nonblock(dst, src);
- }
-
-
-
- inline void
- BlockSparseMatrix::vmult(Vector &dst, const BlockVector &src) const
- {
- BaseClass::vmult_nonblock_block(dst, src);
- }
-
-
-
- inline void
- BlockSparseMatrix::vmult(Vector &dst, const Vector &src) const
- {
- BaseClass::vmult_nonblock_nonblock(dst, src);
- }
-
-
- inline void
- BlockSparseMatrix::Tvmult(BlockVector &dst, const BlockVector &src) const
- {
- BaseClass::Tvmult_block_block(dst, src);
- }
-
-
-
- inline void
- BlockSparseMatrix::Tvmult(BlockVector &dst, const Vector &src) const
- {
- BaseClass::Tvmult_block_nonblock(dst, src);
- }
-
-
-
- inline void
- BlockSparseMatrix::Tvmult(Vector &dst, const BlockVector &src) const
- {
- BaseClass::Tvmult_nonblock_block(dst, src);
- }
-
-
-
- inline void
- BlockSparseMatrix::Tvmult(Vector &dst, const Vector &src) const
- {
- BaseClass::Tvmult_nonblock_nonblock(dst, src);
- }
-
- } // namespace MPI
-
-} // namespace PETScWrappers
-
-
-DEAL_II_NAMESPACE_CLOSE
-
-
-#endif // DEAL_II_WITH_PETSC
+#pragma DEAL_II_WARNING( \
+ "This file is deprecated. Use deal.II/lac/petsc_block_sparse_matrix.h instead!")
#endif // dealii_petsc_parallel_block_sparse_matrix_h
#include <deal.II/base/config.h>
-#ifdef DEAL_II_WITH_PETSC
+#include <deal.II/lac/petsc_block_vector.h>
-# include <deal.II/lac/block_indices.h>
-# include <deal.II/lac/block_vector_base.h>
-# include <deal.II/lac/exceptions.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
-# include <deal.II/lac/vector_type_traits.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-namespace PETScWrappers
-{
- // forward declaration
- class BlockVector;
-
- namespace MPI
- {
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * An implementation of block vectors based on the parallel vector class
- * implemented in PETScWrappers. While the base class provides for most of
- * the interface, this class handles the actual allocation of vectors and
- * provides functions that are specific to the underlying vector type.
- *
- * The model of distribution of data is such that each of the blocks is
- * distributed across all MPI processes named in the MPI communicator.
- * I.e. we don't just distribute the whole vector, but each component. In
- * the constructors and reinit() functions, one therefore not only has to
- * specify the sizes of the individual blocks, but also the number of
- * elements of each of these blocks to be stored on the local process.
- *
- * @ingroup Vectors @see
- * @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
- class BlockVector : public BlockVectorBase<Vector>
- {
- public:
- /**
- * Typedef the base class for simpler access to its own alias.
- */
- using BaseClass = BlockVectorBase<Vector>;
-
- /**
- * Typedef the type of the underlying vector.
- */
- using BlockType = BaseClass::BlockType;
-
- /**
- * Import the alias from the base class.
- */
- using value_type = BaseClass::value_type;
- using pointer = BaseClass::pointer;
- using const_pointer = BaseClass::const_pointer;
- using reference = BaseClass::reference;
- using const_reference = BaseClass::const_reference;
- using size_type = BaseClass::size_type;
- using iterator = BaseClass::iterator;
- using const_iterator = BaseClass::const_iterator;
-
- /**
- * Default constructor. Generate an empty vector without any blocks.
- */
- BlockVector() = default;
-
- /**
- * Constructor. Generate a block vector with @p n_blocks blocks, each of
- * which is a parallel vector across @p communicator with @p block_size
- * elements of which @p local_size elements are stored on the present
- * process.
- */
- explicit BlockVector(const unsigned int n_blocks,
- const MPI_Comm & communicator,
- const size_type block_size,
- const size_type local_size);
-
- /**
- * Copy constructor. Set all the properties of the parallel vector to
- * those of the given argument and copy the elements.
- */
- BlockVector(const BlockVector &V);
-
- /**
- * Constructor. Set the number of blocks to <tt>block_sizes.size()</tt>
- * and initialize each block with <tt>block_sizes[i]</tt> zero elements.
- * The individual blocks are distributed across the given communicator,
- * and each store <tt>local_elements[i]</tt> elements on the present
- * process.
- */
- BlockVector(const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
- const std::vector<size_type> &local_elements);
-
- /**
- * Create a BlockVector with parallel_partitioning.size() blocks, each
- * initialized with the given IndexSet.
- */
- explicit BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Same as above, but include ghost elements
- */
- BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
- const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator);
-
-
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector() override = default;
-
- /**
- * Copy operator: fill all components of the vector that are locally
- * stored with the given scalar value.
- */
- BlockVector &
- operator=(const value_type s);
-
- /**
- * Copy operator for arguments of the same type.
- */
- BlockVector &
- operator=(const BlockVector &V);
-
- /**
- * Reinitialize the BlockVector to contain @p n_blocks of size @p
- * block_size, each of which stores @p local_size elements locally. The
- * @p communicator argument denotes which MPI channel each of these
- * blocks shall communicate.
- *
- * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
- * zeros.
- */
- void
- reinit(const unsigned int n_blocks,
- const MPI_Comm & communicator,
- const size_type block_size,
- const size_type local_size,
- const bool omit_zeroing_entries = false);
-
- /**
- * Reinitialize the BlockVector such that it contains
- * <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
- * dimension <tt>block_sizes[i]</tt>. Each of them stores
- * <tt>local_sizes[i]</tt> elements on the present process.
- *
- * If the number of blocks is the same as before this function was
- * called, all vectors remain the same and reinit() is called for each
- * vector.
- *
- * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
- * zeros.
- *
- * Note that you must call this (or the other reinit() functions)
- * function, rather than calling the reinit() functions of an individual
- * block, to allow the block vector to update its caches of vector
- * sizes. If you call reinit() of one of the blocks, then subsequent
- * actions on this object may yield unpredictable results since they may
- * be routed to the wrong block.
- */
- void
- reinit(const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
- const std::vector<size_type> &local_sizes,
- const bool omit_zeroing_entries = false);
-
- /**
- * Change the dimension to that of the vector <tt>V</tt>. The same
- * applies as for the other reinit() function.
- *
- * The elements of <tt>V</tt> are not copied, i.e. this function is the
- * same as calling <tt>reinit (V.size(), omit_zeroing_entries)</tt>.
- *
- * Note that you must call this (or the other reinit() functions)
- * function, rather than calling the reinit() functions of an individual
- * block, to allow the block vector to update its caches of vector
- * sizes. If you call reinit() on one of the blocks, then subsequent
- * actions on this object may yield unpredictable results since they may
- * be routed to the wrong block.
- */
- void
- reinit(const BlockVector &V, const bool omit_zeroing_entries = false);
-
- /**
- * Reinitialize the BlockVector using IndexSets. See the constructor
- * with the same arguments for details.
- */
- void
- reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator);
-
- /**
- * Same as above but include ghost entries.
- */
- void
- reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const std::vector<IndexSet> &ghost_entries,
- const MPI_Comm & communicator);
-
- /**
- * Change the number of blocks to <tt>num_blocks</tt>. The individual
- * blocks will get initialized with zero size, so it is assumed that the
- * user resizes the individual blocks by herself in an appropriate way,
- * and calls <tt>collect_sizes</tt> afterwards.
- */
- void
- reinit(const unsigned int num_blocks);
-
- /**
- * Return if this vector is a ghosted vector (and thus read-only).
- */
- bool
- has_ghost_elements() const;
-
- /**
- * Return a reference to the MPI communicator object in use with this
- * vector.
- */
- const MPI_Comm &
- get_mpi_communicator() const;
-
- /**
- * Swap the contents of this vector and the other vector <tt>v</tt>. One
- * could do this operation with a temporary variable and copying over
- * the data elements, but this function is significantly more efficient
- * since it only swaps the pointers to the data of the two vectors and
- * therefore does not need to allocate temporary storage and move data
- * around.
- *
- * Limitation: right now this function only works if both vectors have
- * the same number of blocks. If needed, the numbers of blocks should be
- * exchanged, too.
- *
- * This function is analogous to the swap() function of all C++
- * standard containers. Also, there is a global function swap(u,v) that
- * simply calls <tt>u.swap(v)</tt>, again in analogy to standard
- * functions.
- */
- void
- swap(BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void
- print(std::ostream & out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Exception
- */
- DeclException0(ExcIteratorRangeDoesNotMatchVectorSize);
- /**
- * Exception
- */
- DeclException0(ExcNonMatchingBlockVectors);
- };
-
- /*@}*/
-
- /*--------------------- Inline functions --------------------------------*/
-
- inline BlockVector::BlockVector(const unsigned int n_blocks,
- const MPI_Comm & communicator,
- const size_type block_size,
- const size_type local_size)
- {
- reinit(n_blocks, communicator, block_size, local_size);
- }
-
-
-
- inline BlockVector::BlockVector(
- const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
- const std::vector<size_type> &local_elements)
- {
- reinit(block_sizes, communicator, local_elements, false);
- }
-
-
- inline BlockVector::BlockVector(const BlockVector &v)
- : BlockVectorBase<Vector>()
- {
- this->components.resize(v.n_blocks());
- this->block_indices = v.block_indices;
-
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- this->components[i] = v.components[i];
- }
-
- inline BlockVector::BlockVector(
- const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator)
- {
- reinit(parallel_partitioning, communicator);
- }
-
- inline BlockVector::BlockVector(
- const std::vector<IndexSet> ¶llel_partitioning,
- const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator)
- {
- reinit(parallel_partitioning, ghost_indices, communicator);
- }
-
- inline BlockVector &
- BlockVector::operator=(const value_type s)
- {
- BaseClass::operator=(s);
- return *this;
- }
-
- inline BlockVector &
- BlockVector::operator=(const BlockVector &v)
- {
- // we only allow assignment to vectors with the same number of blocks
- // or to an empty BlockVector
- Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
- ExcDimensionMismatch(n_blocks(), v.n_blocks()));
-
- if (this->n_blocks() != v.n_blocks())
- reinit(v.n_blocks());
-
- for (size_type i = 0; i < this->n_blocks(); ++i)
- this->components[i] = v.block(i);
-
- collect_sizes();
-
- return *this;
- }
-
-
-
- inline void
- BlockVector::reinit(const unsigned int n_blocks,
- const MPI_Comm & communicator,
- const size_type block_size,
- const size_type local_size,
- const bool omit_zeroing_entries)
- {
- reinit(std::vector<size_type>(n_blocks, block_size),
- communicator,
- std::vector<size_type>(n_blocks, local_size),
- omit_zeroing_entries);
- }
-
-
-
- inline void
- BlockVector::reinit(const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
- const std::vector<size_type> &local_sizes,
- const bool omit_zeroing_entries)
- {
- this->block_indices.reinit(block_sizes);
- if (this->components.size() != this->n_blocks())
- this->components.resize(this->n_blocks());
-
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- this->components[i].reinit(communicator,
- block_sizes[i],
- local_sizes[i],
- omit_zeroing_entries);
- }
-
-
- inline void
- BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
- {
- this->block_indices = v.get_block_indices();
- if (this->components.size() != this->n_blocks())
- this->components.resize(this->n_blocks());
-
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- block(i).reinit(v.block(i), omit_zeroing_entries);
- }
-
- inline void
- BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator)
- {
- std::vector<size_type> sizes(parallel_partitioning.size());
- for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
- sizes[i] = parallel_partitioning[i].size();
-
- this->block_indices.reinit(sizes);
- if (this->components.size() != this->n_blocks())
- this->components.resize(this->n_blocks());
-
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- block(i).reinit(parallel_partitioning[i], communicator);
- }
-
- inline void
- BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const std::vector<IndexSet> &ghost_entries,
- const MPI_Comm & communicator)
- {
- std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
- for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
- sizes[i] = parallel_partitioning[i].size();
-
- this->block_indices.reinit(sizes);
- if (this->components.size() != this->n_blocks())
- this->components.resize(this->n_blocks());
-
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- block(i).reinit(parallel_partitioning[i],
- ghost_entries[i],
- communicator);
- }
-
-
-
- inline const MPI_Comm &
- BlockVector::get_mpi_communicator() const
- {
- return block(0).get_mpi_communicator();
- }
-
- inline bool
- BlockVector::has_ghost_elements() const
- {
- bool ghosted = block(0).has_ghost_elements();
-# ifdef DEBUG
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError());
-# endif
- return ghosted;
- }
-
-
- inline void
- BlockVector::swap(BlockVector &v)
- {
- std::swap(this->components, v.components);
-
- ::dealii::swap(this->block_indices, v.block_indices);
- }
-
-
-
- inline void
- BlockVector::print(std::ostream & out,
- const unsigned int precision,
- const bool scientific,
- const bool across) const
- {
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- {
- if (across)
- out << 'C' << i << ':';
- else
- out << "Component " << i << std::endl;
- this->components[i].print(out, precision, scientific, across);
- }
- }
-
-
-
- /**
- * Global function which overloads the default implementation of the C++
- * standard library which uses a temporary object. The function simply
- * exchanges the data of the two vectors.
- *
- * @relatesalso PETScWrappers::MPI::BlockVector
- * @author Wolfgang Bangerth, 2000
- */
- inline void
- swap(BlockVector &u, BlockVector &v)
- {
- u.swap(v);
- }
-
- } // namespace MPI
-
-} // namespace PETScWrappers
-
-namespace internal
-{
- namespace LinearOperatorImplementation
- {
- template <typename>
- class ReinitHelper;
-
- /**
- * A helper class used internally in linear_operator.h. Specialization for
- * PETScWrappers::MPI::BlockVector.
- */
- template <>
- class ReinitHelper<PETScWrappers::MPI::BlockVector>
- {
- public:
- template <typename Matrix>
- static void
- reinit_range_vector(const Matrix & matrix,
- PETScWrappers::MPI::BlockVector &v,
- bool /*omit_zeroing_entries*/)
- {
- v.reinit(matrix.locally_owned_range_indices(),
- matrix.get_mpi_communicator());
- }
-
- template <typename Matrix>
- static void
- reinit_domain_vector(const Matrix & matrix,
- PETScWrappers::MPI::BlockVector &v,
- bool /*omit_zeroing_entries*/)
- {
- v.reinit(matrix.locally_owned_domain_indices(),
- matrix.get_mpi_communicator());
- }
- };
-
- } // namespace LinearOperatorImplementation
-} /* namespace internal */
-
-
-/**
- * Declare dealii::PETScWrappers::MPI::BlockVector as distributed vector.
- *
- * @author Uwe Koecher, 2017
- */
-template <>
-struct is_serial_vector<PETScWrappers::MPI::BlockVector> : std::false_type
-{};
-
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif // DEAL_II_WITH_PETSC
+#pragma DEAL_II_WARNING( \
+ "This file is deprecated. Use deal.II/lac/petsc_block_vector.h instead!")
#endif
# include <deal.II/base/config.h>
-# ifdef DEAL_II_WITH_PETSC
+# include <deal.II/lac/petsc_sparse_matrix.h>
-# include <deal.II/lac/exceptions.h>
-# include <deal.II/lac/petsc_matrix_base.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
-
-# include <vector>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-// forward declaration
-template <typename Matrix>
-class BlockMatrixBase;
-
-
-namespace PETScWrappers
-{
- namespace MPI
- {
- /**
- * Implementation of a parallel sparse matrix class based on PETSc, with
- * rows of the matrix distributed across an MPI network. All the
- * functionality is actually in the base class, except for the calls to
- * generate a parallel sparse matrix. This is possible since PETSc only
- * works on an abstract matrix type and internally distributes to
- * functions that do the actual work depending on the actual matrix type
- * (much like using virtual functions). Only the functions creating a
- * matrix of specific type differ, and are implemented in this particular
- * class.
- *
- * There are a number of comments on the communication model as well as
- * access to individual elements in the documentation to the parallel
- * vector class. These comments apply here as well.
- *
- *
- * <h3>Partitioning of matrices</h3>
- *
- * PETSc partitions parallel matrices so that each MPI process "owns" a
- * certain number of rows (i.e. only this process stores the respective
- * entries in these rows). The number of rows each process owns has to be
- * passed to the constructors and reinit() functions via the argument @p
- * local_rows. The individual values passed as @p local_rows on all the
- * MPI processes of course have to add up to the global number of rows of
- * the matrix.
- *
- * In addition to this, PETSc also partitions the rectangular chunk of the
- * matrix it owns (i.e. the @p local_rows times n() elements in the
- * matrix), so that matrix vector multiplications can be performed
- * efficiently. This column-partitioning therefore has to match the
- * partitioning of the vectors with which the matrix is multiplied, just
- * as the row-partitioning has to match the partitioning of destination
- * vectors. This partitioning is passed to the constructors and reinit()
- * functions through the @p local_columns variable, which again has to add
- * up to the global number of columns in the matrix. The name @p
- * local_columns may be named inappropriately since it does not reflect
- * that only these columns are stored locally, but it reflects the fact
- * that these are the columns for which the elements of incoming vectors
- * are stored locally.
- *
- * To make things even more complicated, PETSc needs a very good estimate
- * of the number of elements to be stored in each row to be efficient.
- * Otherwise it spends most of the time with allocating small chunks of
- * memory, a process that can slow down programs to a crawl if it happens
- * to often. As if a good estimate of the number of entries per row isn't
- * even, it even needs to split this as follows: for each row it owns, it
- * needs an estimate for the number of elements in this row that fall into
- * the columns that are set apart for this process (see above), and the
- * number of elements that are in the rest of the columns.
- *
- * Since in general this information is not readily available, most of the
- * initializing functions of this class assume that all of the number of
- * elements you give as an argument to @p n_nonzero_per_row or by @p
- * row_lengths fall into the columns "owned" by this process, and none
- * into the other ones. This is a fair guess for most of the rows, since
- * in a good domain partitioning, nodes only interact with nodes that are
- * within the same subdomain. It does not hold for nodes on the interfaces
- * of subdomain, however, and for the rows corresponding to these nodes,
- * PETSc will have to allocate additional memory, a costly process.
- *
- * The only way to avoid this is to tell PETSc where the actual entries of
- * the matrix will be. For this, there are constructors and reinit()
- * functions of this class that take a DynamicSparsityPattern object
- * containing all this information. While in the general case it is
- * sufficient if the constructors and reinit() functions know the number
- * of local rows and columns, the functions getting a sparsity pattern
- * also need to know the number of local rows (@p local_rows_per_process)
- * and columns (@p local_columns_per_process) for all other processes, in
- * order to compute which parts of the matrix are which. Thus, it is not
- * sufficient to just count the number of degrees of freedom that belong
- * to a particular process, but you have to have the numbers for all
- * processes available at all processes.
- *
- * @ingroup PETScWrappers
- * @ingroup Matrix1
- * @author Wolfgang Bangerth, 2004
- */
- class SparseMatrix : public MatrixBase
- {
- public:
- /**
- * Declare type for container size.
- */
- using size_type = types::global_dof_index;
-
- /**
- * A structure that describes some of the traits of this class in terms
- * of its run-time behavior. Some other classes (such as the block
- * matrix classes) that take one or other of the matrix classes as its
- * template parameters can tune their behavior based on the variables in
- * this class.
- */
- struct Traits
- {
- /**
- * It is not safe to elide additions of zeros to individual elements
- * of this matrix. The reason is that additions to the matrix may
- * trigger collective operations synchronizing buffers on multiple
- * processes. If an addition is elided on one process, this may lead
- * to other processes hanging in an infinite waiting loop.
- */
- static const bool zero_addition_can_be_elided = false;
- };
-
- /**
- * Default constructor. Create an empty matrix.
- */
- SparseMatrix();
-
- /**
- * Destructor to free the PETSc object.
- */
- ~SparseMatrix() override;
-
- /**
- * Create a sparse matrix of dimensions @p m times @p n, with an initial
- * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row
- * nonzero elements per row (see documentation of the MatCreateAIJ PETSc
- * function for more information about these parameters). PETSc is able
- * to cope with the situation that more than this number of elements are
- * later allocated for a row, but this involves copying data, and is
- * thus expensive.
- *
- * For the meaning of the @p local_row and @p local_columns parameters,
- * see the class documentation.
- *
- * The @p is_symmetric flag determines whether we should tell PETSc that
- * the matrix is going to be symmetric (as indicated by the call
- * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
- * documentation states that one cannot form an ILU decomposition of a
- * matrix for which this flag has been set to @p true, only an ICC. The
- * default value of this flag is @p false.
- *
- * @deprecated This constructor is deprecated: please use the
- * constructor with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- SparseMatrix(const MPI_Comm &communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric = false,
- const size_type n_offdiag_nonzero_per_row = 0);
-
- /**
- * Initialize a rectangular matrix with @p m rows and @p n columns. The
- * maximal number of nonzero entries for diagonal and off- diagonal
- * blocks of each row is given by the @p row_lengths and @p
- * offdiag_row_lengths arrays.
- *
- * For the meaning of the @p local_row and @p local_columns parameters,
- * see the class documentation.
- *
- * Just as for the other constructors: PETSc is able to cope with the
- * situation that more than this number of elements are later allocated
- * for a row, but this involves copying data, and is thus expensive.
- *
- * The @p is_symmetric flag determines whether we should tell PETSc that
- * the matrix is going to be symmetric (as indicated by the call
- * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
- * documentation states that one cannot form an ILU decomposition of a
- * matrix for which this flag has been set to @p true, only an ICC. The
- * default value of this flag is @p false.
- *
- * @deprecated This constructor is deprecated: please use the
- * constructor with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- SparseMatrix(const MPI_Comm & communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric = false,
- const std::vector<size_type> &offdiag_row_lengths =
- std::vector<size_type>());
-
- /**
- * Initialize using the given sparsity pattern with communication
- * happening over the provided @p communicator.
- *
- * For the meaning of the @p local_rows_per_process and @p
- * local_columns_per_process parameters, see the class documentation.
- *
- * Note that PETSc can be very slow if you do not provide it with a good
- * estimate of the lengths of rows. Using the present function is a very
- * efficient way to do this, as it uses the exact number of nonzero
- * entries for each row of the matrix by using the given sparsity
- * pattern argument. If the @p preset_nonzero_locations flag is @p true,
- * this function in addition not only sets the correct row sizes up
- * front, but also pre-allocated the correct nonzero entries in the
- * matrix.
- *
- * PETsc allows to later add additional nonzero entries to a matrix, by
- * simply writing to these elements. However, this will then lead to
- * additional memory allocations which are very inefficient and will
- * greatly slow down your program. It is therefore significantly more
- * efficient to get memory allocation right from the start.
- */
- template <typename SparsityPatternType>
- SparseMatrix(const MPI_Comm & communicator,
- const SparsityPatternType & sparsity_pattern,
- const std::vector<size_type> &local_rows_per_process,
- const std::vector<size_type> &local_columns_per_process,
- const unsigned int this_process,
- const bool preset_nonzero_locations = true);
-
- /**
- * This operator assigns a scalar to a matrix. Since this does usually
- * not make much sense (should we set all matrix entries to this value?
- * Only the nonzero entries of the sparsity pattern?), this operation is
- * only allowed if the actual value to be assigned is zero. This
- * operator only exists to allow for the obvious notation
- * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
- * keep the sparsity pattern previously used.
- */
- SparseMatrix &
- operator=(const value_type d);
-
-
- /**
- * Make a copy of the PETSc matrix @p other. It is assumed that both
- * matrices have the same SparsityPattern.
- */
- void
- copy_from(const SparseMatrix &other);
-
- /**
- * Throw away the present matrix and generate one that has the same
- * properties as if it were created by the constructor of this class
- * with the same argument list as the present function.
- *
- * @deprecated This overload of <code>reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- reinit(const MPI_Comm &communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric = false,
- const size_type n_offdiag_nonzero_per_row = 0);
-
- /**
- * Throw away the present matrix and generate one that has the same
- * properties as if it were created by the constructor of this class
- * with the same argument list as the present function.
- *
- * @deprecated This overload of <code>reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- reinit(const MPI_Comm & communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric = false,
- const std::vector<size_type> &offdiag_row_lengths =
- std::vector<size_type>());
-
- /**
- * Initialize using the given sparsity pattern with communication
- * happening over the provided @p communicator.
- *
- * Note that PETSc can be very slow if you do not provide it with a good
- * estimate of the lengths of rows. Using the present function is a very
- * efficient way to do this, as it uses the exact number of nonzero
- * entries for each row of the matrix by using the given sparsity
- * pattern argument. If the @p preset_nonzero_locations flag is @p true,
- * this function in addition not only sets the correct row sizes up
- * front, but also pre-allocated the correct nonzero entries in the
- * matrix.
- *
- * PETsc allows to later add additional nonzero entries to a matrix, by
- * simply writing to these elements. However, this will then lead to
- * additional memory allocations which are very inefficient and will
- * greatly slow down your program. It is therefore significantly more
- * efficient to get memory allocation right from the start.
- */
- template <typename SparsityPatternType>
- void
- reinit(const MPI_Comm & communicator,
- const SparsityPatternType & sparsity_pattern,
- const std::vector<size_type> &local_rows_per_process,
- const std::vector<size_type> &local_columns_per_process,
- const unsigned int this_process,
- const bool preset_nonzero_locations = true);
-
- /**
- * Create a matrix where the size() of the IndexSets determine the
- * global number of rows and columns and the entries of the IndexSet
- * give the rows and columns for the calling processor. Note that only
- * ascending, 1:1 IndexSets are supported.
- */
- template <typename SparsityPatternType>
- void
- reinit(const IndexSet & local_rows,
- const IndexSet & local_columns,
- const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator);
-
- /**
- * Initialize this matrix to have the same structure as @p other. This
- * will not copy the values of the other matrix, but you can use
- * copy_from() for this.
- */
- void
- reinit(const SparseMatrix &other);
-
- /**
- * Return a reference to the MPI communicator object in use with this
- * matrix.
- */
- virtual const MPI_Comm &
- get_mpi_communicator() const override;
-
- /**
- * @addtogroup Exceptions
- * @{
- */
- /**
- * Exception
- */
- DeclException2(ExcLocalRowsTooLarge,
- int,
- int,
- << "The number of local rows " << arg1
- << " must be larger than the total number of rows "
- << arg2);
- //@}
-
- /**
- * Return the square of the norm of the vector $v$ with respect to the
- * norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is
- * useful, e.g. in the finite element context, where the $L_2$ norm of a
- * function equals the matrix norm with respect to the mass matrix of
- * the vector representing the nodal values of the finite element
- * function.
- *
- * Obviously, the matrix needs to be quadratic for this operation.
- *
- * The implementation of this function is not as efficient as the one in
- * the @p MatrixBase class used in deal.II (i.e. the original one, not
- * the PETSc wrapper class) since PETSc doesn't support this operation
- * and needs a temporary vector.
- */
- PetscScalar
- matrix_norm_square(const Vector &v) const;
-
- /**
- * Compute the matrix scalar product $\left(u^\ast,Mv\right)$.
- *
- * The implementation of this function is not as efficient as the one in
- * the @p MatrixBase class used in deal.II (i.e. the original one, not
- * the PETSc wrapper class) since PETSc doesn't support this operation
- * and needs a temporary vector.
- */
- PetscScalar
- matrix_scalar_product(const Vector &u, const Vector &v) const;
-
- /**
- * Return the partitioning of the domain space of this matrix, i.e., the
- * partitioning of the vectors this matrix has to be multiplied with.
- */
- IndexSet
- locally_owned_domain_indices() const;
-
- /**
- * Return the partitioning of the range space of this matrix, i.e., the
- * partitioning of the vectors that result from matrix-vector
- * products.
- */
- IndexSet
- locally_owned_range_indices() const;
-
- /**
- * Perform the matrix-matrix multiplication $C = AB$, or,
- * $C = A \text{diag}(V) B$ given a compatible vector $V$.
- *
- * This function calls MatrixBase::mmult() to do the actual work.
- */
- void
- mmult(SparseMatrix & C,
- const SparseMatrix &B,
- const MPI::Vector & V = MPI::Vector()) const;
-
- /**
- * Perform the matrix-matrix multiplication with the transpose of
- * <tt>this</tt>, i.e., $C = A^T B$, or,
- * $C = A^T \text{diag}(V) B$ given a compatible vector $V$.
- *
- * This function calls MatrixBase::Tmmult() to do the actual work.
- */
- void
- Tmmult(SparseMatrix & C,
- const SparseMatrix &B,
- const MPI::Vector & V = MPI::Vector()) const;
-
- private:
- /**
- * Copy of the communicator object to be used for this parallel vector.
- */
- MPI_Comm communicator;
-
- /**
- * Do the actual work for the respective reinit() function and the
- * matching constructor, i.e. create a matrix. Getting rid of the
- * previous matrix is left to the caller.
- *
- * @deprecated This overload of <code>do_reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- do_reinit(const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric = false,
- const size_type n_offdiag_nonzero_per_row = 0);
-
- /**
- * Same as previous function.
- *
- * @deprecated This overload of <code>do_reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- do_reinit(const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric = false,
- const std::vector<size_type> &offdiag_row_lengths =
- std::vector<size_type>());
-
- /**
- * Same as previous functions.
- */
- template <typename SparsityPatternType>
- void
- do_reinit(const SparsityPatternType & sparsity_pattern,
- const std::vector<size_type> &local_rows_per_process,
- const std::vector<size_type> &local_columns_per_process,
- const unsigned int this_process,
- const bool preset_nonzero_locations);
-
- /**
- * Same as previous functions.
- */
- template <typename SparsityPatternType>
- void
- do_reinit(const IndexSet & local_rows,
- const IndexSet & local_columns,
- const SparsityPatternType &sparsity_pattern);
-
- /**
- * To allow calling protected prepare_add() and prepare_set().
- */
- friend class BlockMatrixBase<SparseMatrix>;
- };
-
-
-
- // -------- template and inline functions ----------
-
- inline const MPI_Comm &
- SparseMatrix::get_mpi_communicator() const
- {
- return communicator;
- }
- } // namespace MPI
-} // namespace PETScWrappers
-
-DEAL_II_NAMESPACE_CLOSE
-
-# endif // DEAL_II_WITH_PETSC
+# pragma DEAL_II_WARNING( \
+ "This file is deprecated. Use deal.II/lac/petsc_sparse_matrix.h instead.")
#endif
/*---------------------- petsc_parallel_sparse_matrix.h ---------------------*/
#ifndef dealii_petsc_parallel_vector_h
# define dealii_petsc_parallel_vector_h
-
# include <deal.II/base/config.h>
-# ifdef DEAL_II_WITH_PETSC
-
-# include <deal.II/base/index_set.h>
-# include <deal.II/base/subscriptor.h>
-
-# include <deal.II/lac/exceptions.h>
-# include <deal.II/lac/petsc_vector_base.h>
-# include <deal.II/lac/vector.h>
-# include <deal.II/lac/vector_operation.h>
-# include <deal.II/lac/vector_type_traits.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-/*! @addtogroup PETScWrappers
- *@{
- */
-namespace PETScWrappers
-{
- /**
- * Namespace for PETSc classes that work in parallel over MPI, such as
- * distributed vectors and matrices.
- *
- * @ingroup PETScWrappers
- * @author Wolfgang Bangerth, 2004
- */
- namespace MPI
- {
- /**
- * Implementation of a parallel vector class based on PETSC and using MPI
- * communication to synchronize distributed operations. All the
- * functionality is actually in the base class, except for the calls to
- * generate a parallel vector. This is possible since PETSc only works on
- * an abstract vector type and internally distributes to functions that do
- * the actual work depending on the actual vector type (much like using
- * virtual functions). Only the functions creating a vector of specific
- * type differ, and are implemented in this particular class.
- *
- *
- * <h3>Parallel communication model</h3>
- *
- * The parallel functionality of PETSc is built on top of the Message
- * Passing Interface (MPI). MPI's communication model is built on
- * collective communications: if one process wants something from another,
- * that other process has to be willing to accept this communication. A
- * process cannot query data from another process by calling a remote
- * function, without that other process expecting such a transaction. The
- * consequence is that most of the operations in the base class of this
- * class have to be called collectively. For example, if you want to
- * compute the l2 norm of a parallel vector, @em all processes across
- * which this vector is shared have to call the @p l2_norm function. If
- * you don't do this, but instead only call the @p l2_norm function on one
- * process, then the following happens: This one process will call one of
- * the collective MPI functions and wait for all the other processes to
- * join in on this. Since the other processes don't call this function,
- * you will either get a time-out on the first process, or, worse, by the
- * time the next a call to a PETSc function generates an MPI message on
- * the other processes, you will get a cryptic message that only a subset
- * of processes attempted a communication. These bugs can be very hard to
- * figure out, unless you are well-acquainted with the communication model
- * of MPI, and know which functions may generate MPI messages.
- *
- * One particular case, where an MPI message may be generated unexpectedly
- * is discussed below.
- *
- *
- * <h3>Accessing individual elements of a vector</h3>
- *
- * PETSc does allow read access to individual elements of a vector, but in
- * the distributed case only to elements that are stored locally. We
- * implement this through calls like <tt>d=vec(i)</tt>. However, if you
- * access an element outside the locally stored range, an exception is
- * generated.
- *
- * In contrast to read access, PETSc (and the respective deal.II wrapper
- * classes) allow to write (or add) to individual elements of vectors,
- * even if they are stored on a different process. You can do this
- * writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>, or
- * similar operations. There is one catch, however, that may lead to very
- * confusing error messages: PETSc requires application programs to call
- * the compress() function when they switch from adding, to elements to
- * writing to elements. The reasoning is that all processes might
- * accumulate addition operations to elements, even if multiple processes
- * write to the same elements. By the time we call compress() the next
- * time, all these additions are executed. However, if one process adds to
- * an element, and another overwrites to it, the order of execution would
- * yield non-deterministic behavior if we don't make sure that a
- * synchronization with compress() happens in between.
- *
- * In order to make sure these calls to compress() happen at the
- * appropriate time, the deal.II wrappers keep a state variable that store
- * which is the presently allowed operation: additions or writes. If it
- * encounters an operation of the opposite kind, it calls compress() and
- * flips the state. This can sometimes lead to very confusing behavior, in
- * code that may for example look like this:
- * @code
- * PETScWrappers::MPI::Vector vector;
- * ...
- * // do some write operations on the vector
- * for (unsigned int i=0; i<vector.size(); ++i)
- * vector(i) = i;
- *
- * // do some additions to vector elements, but only for some elements
- * for (unsigned int i=0; i<vector.size(); ++i)
- * if (some_condition(i) == true)
- * vector(i) += 1;
- *
- * // do another collective operation
- * const double norm = vector.l2_norm();
- * @endcode
- *
- * This code can run into trouble: by the time we see the first addition
- * operation, we need to flush the overwrite buffers for the vector, and
- * the deal.II library will do so by calling compress(). However, it will
- * only do so for all processes that actually do an addition -- if the
- * condition is never true for one of the processes, then this one will
- * not get to the actual compress() call, whereas all the other ones do.
- * This gets us into trouble, since all the other processes hang in the
- * call to flush the write buffers, while the one other process advances
- * to the call to compute the l2 norm. At this time, you will get an error
- * that some operation was attempted by only a subset of processes. This
- * behavior may seem surprising, unless you know that write/addition
- * operations on single elements may trigger this behavior.
- *
- * The problem described here may be avoided by placing additional calls
- * to compress(), or making sure that all processes do the same type of
- * operations at the same time, for example by placing zero additions if
- * necessary.
- *
- * @see
- * @ref GlossGhostedVector "vectors with ghost elements"
- *
- * @ingroup PETScWrappers
- * @ingroup Vectors
- * @author Wolfgang Bangerth, 2004
- */
- class Vector : public VectorBase
- {
- public:
- /**
- * Declare type for container size.
- */
- using size_type = types::global_dof_index;
-
- /**
- * Default constructor. Initialize the vector as empty.
- */
- Vector();
-
- /**
- * Constructor. Set dimension to @p n and initialize all elements with
- * zero.
- *
- * @arg local_size denotes the size of the chunk that shall be stored on
- * the present process.
- *
- * @arg communicator denotes the MPI communicator over which the
- * different parts of the vector shall communicate
- *
- * The constructor is made explicit to avoid accidents like this:
- * <tt>v=0;</tt>. Presumably, the user wants to set every element of the
- * vector to zero, but instead, what happens is this call:
- * <tt>v=Vector@<number@>(0);</tt>, i.e. the vector is replaced by one
- * of length zero.
- */
- explicit Vector(const MPI_Comm &communicator,
- const size_type n,
- const size_type local_size);
-
-
- /**
- * Copy-constructor from deal.II vectors. Sets the dimension to that of
- * the given vector, and copies all elements.
- *
- * @arg local_size denotes the size of the chunk that shall be stored on
- * the present process.
- *
- * @arg communicator denotes the MPI communicator over which the
- * different parts of the vector shall communicate
- */
- template <typename Number>
- explicit Vector(const MPI_Comm & communicator,
- const dealii::Vector<Number> &v,
- const size_type local_size);
-
-
- /**
- * Copy-constructor the values from a PETSc wrapper vector class.
- *
- * @arg local_size denotes the size of the chunk that shall be stored on
- * the present process.
- *
- * @arg communicator denotes the MPI communicator over which the
- * different parts of the vector shall communicate
- *
- * @deprecated The use of objects that are explicitly of type VectorBase
- * is deprecated: use PETScWrappers::MPI::Vector instead.
- */
- DEAL_II_DEPRECATED
- explicit Vector(const MPI_Comm & communicator,
- const VectorBase &v,
- const size_type local_size);
-
- /**
- * Construct a new parallel ghosted PETSc vector from IndexSets.
- *
- * Note that @p local must be ascending and 1:1, see
- * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in
- * @p local need to be contiguous, meaning you can only create vectors
- * from a DoFHandler with several finite element components if they are
- * not reordered by component (use a PETScWrappers::BlockVector
- * otherwise). The global size of the vector is determined by
- * local.size(). The global indices in @p ghost are supplied as ghost
- * indices so that they can be read locally.
- *
- * Note that the @p ghost IndexSet may be empty and that any indices
- * already contained in @p local are ignored during construction. That
- * way, the ghost parameter can equal the set of locally relevant
- * degrees of freedom, see step-32.
- *
- * @note This operation always creates a ghosted vector, which is considered
- * read-only.
- *
- * @see
- * @ref GlossGhostedVector "vectors with ghost elements"
- */
- Vector(const IndexSet &local,
- const IndexSet &ghost,
- const MPI_Comm &communicator);
-
- /**
- * Construct a new parallel PETSc vector without ghost elements from an
- * IndexSet.
- *
- * Note that @p local must be ascending and 1:1, see
- * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in
- * @p local need to be contiguous, meaning you can only create vectors
- * from a DoFHandler with several finite element components if they are
- * not reordered by component (use a PETScWrappers::BlockVector
- * otherwise).
- */
- explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
-
- /**
- * Release all memory and return to a state just like after having
- * called the default constructor.
- */
- virtual void
- clear() override;
-
- /**
- * Copy the given vector. Resize the present vector if necessary. Also
- * take over the MPI communicator of @p v.
- */
- Vector &
- operator=(const Vector &v);
-
- /**
- * Set all components of the vector to the given number @p s. Simply
- * pass this down to the base class, but we still need to declare this
- * function to make the example given in the discussion about making the
- * constructor explicit work.
- */
- Vector &
- operator=(const PetscScalar s);
-
- /**
- * Copy the values of a deal.II vector (as opposed to those of the PETSc
- * vector wrapper class) into this object.
- *
- * Contrary to the case of sequential vectors, this operators requires
- * that the present vector already has the correct size, since we need
- * to have a partition and a communicator present which we otherwise
- * can't get from the source vector.
- */
- template <typename number>
- Vector &
- operator=(const dealii::Vector<number> &v);
-
- /**
- * Change the dimension of the vector to @p N. It is unspecified how
- * resizing the vector affects the memory allocation of this object;
- * i.e., it is not guaranteed that resizing it to a smaller size
- * actually also reduces memory consumption, or if for efficiency the
- * same amount of memory is used
- *
- * @p local_size denotes how many of the @p N values shall be stored
- * locally on the present process. for less data.
- *
- * @p communicator denotes the MPI communicator henceforth to be used
- * for this vector.
- *
- * If @p omit_zeroing_entries is false, the vector is filled by zeros.
- * Otherwise, the elements are left an unspecified state.
- */
- void
- reinit(const MPI_Comm &communicator,
- const size_type N,
- const size_type local_size,
- const bool omit_zeroing_entries = false);
-
- /**
- * Change the dimension to that of the vector @p v, and also take over
- * the partitioning into local sizes as well as the MPI communicator.
- * The same applies as for the other @p reinit function.
- *
- * The elements of @p v are not copied, i.e. this function is the same
- * as calling <tt>reinit(v.size(), v.local_size(),
- * omit_zeroing_entries)</tt>.
- */
- void
- reinit(const Vector &v, const bool omit_zeroing_entries = false);
-
- /**
- * Reinit as a vector with ghost elements. See the constructor with
- * same signature for more details.
- *
- * @see
- * @ref GlossGhostedVector "vectors with ghost elements"
- */
- void
- reinit(const IndexSet &local,
- const IndexSet &ghost,
- const MPI_Comm &communicator);
-
- /**
- * Reinit as a vector without ghost elements. See constructor with same
- * signature for more details.
- *
- * @see
- * @ref GlossGhostedVector "vectors with ghost elements"
- */
- void
- reinit(const IndexSet &local, const MPI_Comm &communicator);
-
- /**
- * Return a reference to the MPI communicator object in use with this
- * vector.
- */
- const MPI_Comm &
- get_mpi_communicator() const override;
-
- /**
- * Print to a stream. @p precision denotes the desired precision with
- * which values shall be printed, @p scientific whether scientific
- * notation shall be used. If @p across is @p true then the vector is
- * printed in a line, while if @p false then the elements are printed on
- * a separate line each.
- *
- * @note This function overloads the one in the base class to ensure
- * that the right thing happens for parallel vectors that are
- * distributed across processors.
- */
- void
- print(std::ostream & out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * @copydoc PETScWrappers::VectorBase::all_zero()
- *
- * @note This function overloads the one in the base class to make this
- * a collective operation.
- */
- bool
- all_zero() const;
-
- protected:
- /**
- * Create a vector of length @p n. For this class, we create a parallel
- * vector. @p n denotes the total size of the vector to be created. @p
- * local_size denotes how many of these elements shall be stored
- * locally.
- */
- virtual void
- create_vector(const size_type n, const size_type local_size);
-
-
-
- /**
- * Create a vector of global length @p n, local size @p local_size and
- * with the specified ghost indices. Note that you need to call
- * update_ghost_values() before accessing those.
- */
- virtual void
- create_vector(const size_type n,
- const size_type local_size,
- const IndexSet &ghostnodes);
-
-
- private:
- /**
- * Copy of the communicator object to be used for this parallel vector.
- */
- MPI_Comm communicator;
- };
-
-
- // ------------------ template and inline functions -------------
-
-
- /**
- * Global function @p swap which overloads the default implementation of
- * the C++ standard library which uses a temporary object. The function
- * simply exchanges the data of the two vectors.
- *
- * @relatesalso PETScWrappers::MPI::Vector
- * @author Wolfgang Bangerth, 2004
- */
- inline void
- swap(Vector &u, Vector &v)
- {
- u.swap(v);
- }
-
-
-# ifndef DOXYGEN
-
- template <typename number>
- Vector::Vector(const MPI_Comm & communicator,
- const dealii::Vector<number> &v,
- const size_type local_size)
- : communicator(communicator)
- {
- Vector::create_vector(v.size(), local_size);
-
- *this = v;
- }
-
-
-
- inline Vector &
- Vector::operator=(const PetscScalar s)
- {
- VectorBase::operator=(s);
-
- return *this;
- }
-
-
-
- template <typename number>
- inline Vector &
- Vector::operator=(const dealii::Vector<number> &v)
- {
- Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
-
- // FIXME: the following isn't necessarily fast, but this is due to
- // the fact that PETSc doesn't offer an inlined access operator.
- //
- // if someone wants to contribute some code: to make this code
- // faster, one could either first convert all values to PetscScalar,
- // and then set them all at once using VecSetValues. This has the
- // drawback that it could take quite some memory, if the vector is
- // large, and it would in addition allocate memory on the heap, which
- // is expensive. an alternative would be to split the vector into
- // chunks of, say, 128 elements, convert a chunk at a time and set it
- // in the output vector using VecSetValues. since 128 elements is
- // small enough, this could easily be allocated on the stack (as a
- // local variable) which would make the whole thing much more
- // efficient.
- //
- // a second way to make things faster is for the special case that
- // number==PetscScalar. we could then declare a specialization of
- // this template, and omit the conversion. the problem with this is
- // that the best we can do is to use VecSetValues, but this isn't
- // very efficient either: it wants to see an array of indices, which
- // in this case a) again takes up a whole lot of memory on the heap,
- // and b) is totally dumb since its content would simply be the
- // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
- // function in Petsc that would take a pointer to an array of
- // PetscScalar values and simply copy n elements verbatim into the
- // vector...
- for (size_type i = 0; i < v.size(); ++i)
- (*this)(i) = v(i);
-
- compress(::dealii::VectorOperation::insert);
-
- return *this;
- }
-
-
-
- inline const MPI_Comm &
- Vector::get_mpi_communicator() const
- {
- return communicator;
- }
-
-# endif // DOXYGEN
- } // namespace MPI
-} // namespace PETScWrappers
-
-namespace internal
-{
- namespace LinearOperatorImplementation
- {
- template <typename>
- class ReinitHelper;
-
- /**
- * A helper class used internally in linear_operator.h. Specialization for
- * PETScWrappers::MPI::Vector.
- */
- template <>
- class ReinitHelper<PETScWrappers::MPI::Vector>
- {
- public:
- template <typename Matrix>
- static void
- reinit_range_vector(const Matrix & matrix,
- PETScWrappers::MPI::Vector &v,
- bool /*omit_zeroing_entries*/)
- {
- v.reinit(matrix.locally_owned_range_indices(),
- matrix.get_mpi_communicator());
- }
-
- template <typename Matrix>
- static void
- reinit_domain_vector(const Matrix & matrix,
- PETScWrappers::MPI::Vector &v,
- bool /*omit_zeroing_entries*/)
- {
- v.reinit(matrix.locally_owned_domain_indices(),
- matrix.get_mpi_communicator());
- }
- };
-
- } // namespace LinearOperatorImplementation
-} /* namespace internal */
-
-/**@}*/
-
-
-/**
- * Declare dealii::PETScWrappers::MPI::Vector as distributed vector.
- *
- * @author Uwe Koecher, 2017
- */
-template <>
-struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
-{};
-
-
-DEAL_II_NAMESPACE_CLOSE
+# include <deal.II/lac/petsc_vector.h>
-# endif // DEAL_II_WITH_PETSC
+# pragma DEAL_II_WARNING( \
+ "This file is deprecated. Use deal.II/lac/petsc_vector.h instead!")
#endif
/*------------------------- petsc_parallel_vector.h -------------------------*/
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_matrix_base.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# include <vector>
*/
friend class BlockMatrixBase<SparseMatrix>;
};
+
+ namespace MPI
+ {
+ /**
+ * Implementation of a parallel sparse matrix class based on PETSc, with
+ * rows of the matrix distributed across an MPI network. All the
+ * functionality is actually in the base class, except for the calls to
+ * generate a parallel sparse matrix. This is possible since PETSc only
+ * works on an abstract matrix type and internally distributes to
+ * functions that do the actual work depending on the actual matrix type
+ * (much like using virtual functions). Only the functions creating a
+ * matrix of specific type differ, and are implemented in this particular
+ * class.
+ *
+ * There are a number of comments on the communication model as well as
+ * access to individual elements in the documentation to the parallel
+ * vector class. These comments apply here as well.
+ *
+ *
+ * <h3>Partitioning of matrices</h3>
+ *
+ * PETSc partitions parallel matrices so that each MPI process "owns" a
+ * certain number of rows (i.e. only this process stores the respective
+ * entries in these rows). The number of rows each process owns has to be
+ * passed to the constructors and reinit() functions via the argument @p
+ * local_rows. The individual values passed as @p local_rows on all the
+ * MPI processes of course have to add up to the global number of rows of
+ * the matrix.
+ *
+ * In addition to this, PETSc also partitions the rectangular chunk of the
+ * matrix it owns (i.e. the @p local_rows times n() elements in the
+ * matrix), so that matrix vector multiplications can be performed
+ * efficiently. This column-partitioning therefore has to match the
+ * partitioning of the vectors with which the matrix is multiplied, just
+ * as the row-partitioning has to match the partitioning of destination
+ * vectors. This partitioning is passed to the constructors and reinit()
+ * functions through the @p local_columns variable, which again has to add
+ * up to the global number of columns in the matrix. The name @p
+ * local_columns may be named inappropriately since it does not reflect
+ * that only these columns are stored locally, but it reflects the fact
+ * that these are the columns for which the elements of incoming vectors
+ * are stored locally.
+ *
+ * To make things even more complicated, PETSc needs a very good estimate
+ * of the number of elements to be stored in each row to be efficient.
+ * Otherwise it spends most of the time with allocating small chunks of
+ * memory, a process that can slow down programs to a crawl if it happens
+ * to often. As if a good estimate of the number of entries per row isn't
+ * even, it even needs to split this as follows: for each row it owns, it
+ * needs an estimate for the number of elements in this row that fall into
+ * the columns that are set apart for this process (see above), and the
+ * number of elements that are in the rest of the columns.
+ *
+ * Since in general this information is not readily available, most of the
+ * initializing functions of this class assume that all of the number of
+ * elements you give as an argument to @p n_nonzero_per_row or by @p
+ * row_lengths fall into the columns "owned" by this process, and none
+ * into the other ones. This is a fair guess for most of the rows, since
+ * in a good domain partitioning, nodes only interact with nodes that are
+ * within the same subdomain. It does not hold for nodes on the interfaces
+ * of subdomain, however, and for the rows corresponding to these nodes,
+ * PETSc will have to allocate additional memory, a costly process.
+ *
+ * The only way to avoid this is to tell PETSc where the actual entries of
+ * the matrix will be. For this, there are constructors and reinit()
+ * functions of this class that take a DynamicSparsityPattern object
+ * containing all this information. While in the general case it is
+ * sufficient if the constructors and reinit() functions know the number
+ * of local rows and columns, the functions getting a sparsity pattern
+ * also need to know the number of local rows (@p local_rows_per_process)
+ * and columns (@p local_columns_per_process) for all other processes, in
+ * order to compute which parts of the matrix are which. Thus, it is not
+ * sufficient to just count the number of degrees of freedom that belong
+ * to a particular process, but you have to have the numbers for all
+ * processes available at all processes.
+ *
+ * @ingroup PETScWrappers
+ * @ingroup Matrix1
+ * @author Wolfgang Bangerth, 2004
+ */
+ class SparseMatrix : public MatrixBase
+ {
+ public:
+ /**
+ * Declare type for container size.
+ */
+ using size_type = types::global_dof_index;
+
+ /**
+ * A structure that describes some of the traits of this class in terms
+ * of its run-time behavior. Some other classes (such as the block
+ * matrix classes) that take one or other of the matrix classes as its
+ * template parameters can tune their behavior based on the variables in
+ * this class.
+ */
+ struct Traits
+ {
+ /**
+ * It is not safe to elide additions of zeros to individual elements
+ * of this matrix. The reason is that additions to the matrix may
+ * trigger collective operations synchronizing buffers on multiple
+ * processes. If an addition is elided on one process, this may lead
+ * to other processes hanging in an infinite waiting loop.
+ */
+ static const bool zero_addition_can_be_elided = false;
+ };
+
+ /**
+ * Default constructor. Create an empty matrix.
+ */
+ SparseMatrix();
+
+ /**
+ * Destructor to free the PETSc object.
+ */
+ ~SparseMatrix() override;
+
+ /**
+ * Create a sparse matrix of dimensions @p m times @p n, with an initial
+ * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row
+ * nonzero elements per row (see documentation of the MatCreateAIJ PETSc
+ * function for more information about these parameters). PETSc is able
+ * to cope with the situation that more than this number of elements are
+ * later allocated for a row, but this involves copying data, and is
+ * thus expensive.
+ *
+ * For the meaning of the @p local_row and @p local_columns parameters,
+ * see the class documentation.
+ *
+ * The @p is_symmetric flag determines whether we should tell PETSc that
+ * the matrix is going to be symmetric (as indicated by the call
+ * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
+ * documentation states that one cannot form an ILU decomposition of a
+ * matrix for which this flag has been set to @p true, only an ICC. The
+ * default value of this flag is @p false.
+ *
+ * @deprecated This constructor is deprecated: please use the
+ * constructor with a sparsity pattern argument instead.
+ */
+ DEAL_II_DEPRECATED
+ SparseMatrix(const MPI_Comm &communicator,
+ const size_type m,
+ const size_type n,
+ const size_type local_rows,
+ const size_type local_columns,
+ const size_type n_nonzero_per_row,
+ const bool is_symmetric = false,
+ const size_type n_offdiag_nonzero_per_row = 0);
+
+ /**
+ * Initialize a rectangular matrix with @p m rows and @p n columns. The
+ * maximal number of nonzero entries for diagonal and off- diagonal
+ * blocks of each row is given by the @p row_lengths and @p
+ * offdiag_row_lengths arrays.
+ *
+ * For the meaning of the @p local_row and @p local_columns parameters,
+ * see the class documentation.
+ *
+ * Just as for the other constructors: PETSc is able to cope with the
+ * situation that more than this number of elements are later allocated
+ * for a row, but this involves copying data, and is thus expensive.
+ *
+ * The @p is_symmetric flag determines whether we should tell PETSc that
+ * the matrix is going to be symmetric (as indicated by the call
+ * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
+ * documentation states that one cannot form an ILU decomposition of a
+ * matrix for which this flag has been set to @p true, only an ICC. The
+ * default value of this flag is @p false.
+ *
+ * @deprecated This constructor is deprecated: please use the
+ * constructor with a sparsity pattern argument instead.
+ */
+ DEAL_II_DEPRECATED
+ SparseMatrix(const MPI_Comm & communicator,
+ const size_type m,
+ const size_type n,
+ const size_type local_rows,
+ const size_type local_columns,
+ const std::vector<size_type> &row_lengths,
+ const bool is_symmetric = false,
+ const std::vector<size_type> &offdiag_row_lengths =
+ std::vector<size_type>());
+
+ /**
+ * Initialize using the given sparsity pattern with communication
+ * happening over the provided @p communicator.
+ *
+ * For the meaning of the @p local_rows_per_process and @p
+ * local_columns_per_process parameters, see the class documentation.
+ *
+ * Note that PETSc can be very slow if you do not provide it with a good
+ * estimate of the lengths of rows. Using the present function is a very
+ * efficient way to do this, as it uses the exact number of nonzero
+ * entries for each row of the matrix by using the given sparsity
+ * pattern argument. If the @p preset_nonzero_locations flag is @p true,
+ * this function in addition not only sets the correct row sizes up
+ * front, but also pre-allocated the correct nonzero entries in the
+ * matrix.
+ *
+ * PETsc allows to later add additional nonzero entries to a matrix, by
+ * simply writing to these elements. However, this will then lead to
+ * additional memory allocations which are very inefficient and will
+ * greatly slow down your program. It is therefore significantly more
+ * efficient to get memory allocation right from the start.
+ */
+ template <typename SparsityPatternType>
+ SparseMatrix(const MPI_Comm & communicator,
+ const SparsityPatternType & sparsity_pattern,
+ const std::vector<size_type> &local_rows_per_process,
+ const std::vector<size_type> &local_columns_per_process,
+ const unsigned int this_process,
+ const bool preset_nonzero_locations = true);
+
+ /**
+ * This operator assigns a scalar to a matrix. Since this does usually
+ * not make much sense (should we set all matrix entries to this value?
+ * Only the nonzero entries of the sparsity pattern?), this operation is
+ * only allowed if the actual value to be assigned is zero. This
+ * operator only exists to allow for the obvious notation
+ * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
+ * keep the sparsity pattern previously used.
+ */
+ SparseMatrix &
+ operator=(const value_type d);
+
+
+ /**
+ * Make a copy of the PETSc matrix @p other. It is assumed that both
+ * matrices have the same SparsityPattern.
+ */
+ void
+ copy_from(const SparseMatrix &other);
+
+ /**
+ * Throw away the present matrix and generate one that has the same
+ * properties as if it were created by the constructor of this class
+ * with the same argument list as the present function.
+ *
+ * @deprecated This overload of <code>reinit</code> is deprecated:
+ * please use the overload with a sparsity pattern argument instead.
+ */
+ DEAL_II_DEPRECATED
+ void
+ reinit(const MPI_Comm &communicator,
+ const size_type m,
+ const size_type n,
+ const size_type local_rows,
+ const size_type local_columns,
+ const size_type n_nonzero_per_row,
+ const bool is_symmetric = false,
+ const size_type n_offdiag_nonzero_per_row = 0);
+
+ /**
+ * Throw away the present matrix and generate one that has the same
+ * properties as if it were created by the constructor of this class
+ * with the same argument list as the present function.
+ *
+ * @deprecated This overload of <code>reinit</code> is deprecated:
+ * please use the overload with a sparsity pattern argument instead.
+ */
+ DEAL_II_DEPRECATED
+ void
+ reinit(const MPI_Comm & communicator,
+ const size_type m,
+ const size_type n,
+ const size_type local_rows,
+ const size_type local_columns,
+ const std::vector<size_type> &row_lengths,
+ const bool is_symmetric = false,
+ const std::vector<size_type> &offdiag_row_lengths =
+ std::vector<size_type>());
+
+ /**
+ * Initialize using the given sparsity pattern with communication
+ * happening over the provided @p communicator.
+ *
+ * Note that PETSc can be very slow if you do not provide it with a good
+ * estimate of the lengths of rows. Using the present function is a very
+ * efficient way to do this, as it uses the exact number of nonzero
+ * entries for each row of the matrix by using the given sparsity
+ * pattern argument. If the @p preset_nonzero_locations flag is @p true,
+ * this function in addition not only sets the correct row sizes up
+ * front, but also pre-allocated the correct nonzero entries in the
+ * matrix.
+ *
+ * PETsc allows to later add additional nonzero entries to a matrix, by
+ * simply writing to these elements. However, this will then lead to
+ * additional memory allocations which are very inefficient and will
+ * greatly slow down your program. It is therefore significantly more
+ * efficient to get memory allocation right from the start.
+ */
+ template <typename SparsityPatternType>
+ void
+ reinit(const MPI_Comm & communicator,
+ const SparsityPatternType & sparsity_pattern,
+ const std::vector<size_type> &local_rows_per_process,
+ const std::vector<size_type> &local_columns_per_process,
+ const unsigned int this_process,
+ const bool preset_nonzero_locations = true);
+
+ /**
+ * Create a matrix where the size() of the IndexSets determine the
+ * global number of rows and columns and the entries of the IndexSet
+ * give the rows and columns for the calling processor. Note that only
+ * ascending, 1:1 IndexSets are supported.
+ */
+ template <typename SparsityPatternType>
+ void
+ reinit(const IndexSet & local_rows,
+ const IndexSet & local_columns,
+ const SparsityPatternType &sparsity_pattern,
+ const MPI_Comm & communicator);
+
+ /**
+ * Initialize this matrix to have the same structure as @p other. This
+ * will not copy the values of the other matrix, but you can use
+ * copy_from() for this.
+ */
+ void
+ reinit(const SparseMatrix &other);
+
+ /**
+ * Return a reference to the MPI communicator object in use with this
+ * matrix.
+ */
+ virtual const MPI_Comm &
+ get_mpi_communicator() const override;
+
+ /**
+ * @addtogroup Exceptions
+ * @{
+ */
+ /**
+ * Exception
+ */
+ DeclException2(ExcLocalRowsTooLarge,
+ int,
+ int,
+ << "The number of local rows " << arg1
+ << " must be larger than the total number of rows "
+ << arg2);
+ //@}
+
+ /**
+ * Return the square of the norm of the vector $v$ with respect to the
+ * norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is
+ * useful, e.g. in the finite element context, where the $L_2$ norm of a
+ * function equals the matrix norm with respect to the mass matrix of
+ * the vector representing the nodal values of the finite element
+ * function.
+ *
+ * Obviously, the matrix needs to be quadratic for this operation.
+ *
+ * The implementation of this function is not as efficient as the one in
+ * the @p MatrixBase class used in deal.II (i.e. the original one, not
+ * the PETSc wrapper class) since PETSc doesn't support this operation
+ * and needs a temporary vector.
+ */
+ PetscScalar
+ matrix_norm_square(const Vector &v) const;
+
+ /**
+ * Compute the matrix scalar product $\left(u^\ast,Mv\right)$.
+ *
+ * The implementation of this function is not as efficient as the one in
+ * the @p MatrixBase class used in deal.II (i.e. the original one, not
+ * the PETSc wrapper class) since PETSc doesn't support this operation
+ * and needs a temporary vector.
+ */
+ PetscScalar
+ matrix_scalar_product(const Vector &u, const Vector &v) const;
+
+ /**
+ * Return the partitioning of the domain space of this matrix, i.e., the
+ * partitioning of the vectors this matrix has to be multiplied with.
+ */
+ IndexSet
+ locally_owned_domain_indices() const;
+
+ /**
+ * Return the partitioning of the range space of this matrix, i.e., the
+ * partitioning of the vectors that result from matrix-vector
+ * products.
+ */
+ IndexSet
+ locally_owned_range_indices() const;
+
+ /**
+ * Perform the matrix-matrix multiplication $C = AB$, or,
+ * $C = A \text{diag}(V) B$ given a compatible vector $V$.
+ *
+ * This function calls MatrixBase::mmult() to do the actual work.
+ */
+ void
+ mmult(SparseMatrix & C,
+ const SparseMatrix &B,
+ const MPI::Vector & V = MPI::Vector()) const;
+
+ /**
+ * Perform the matrix-matrix multiplication with the transpose of
+ * <tt>this</tt>, i.e., $C = A^T B$, or,
+ * $C = A^T \text{diag}(V) B$ given a compatible vector $V$.
+ *
+ * This function calls MatrixBase::Tmmult() to do the actual work.
+ */
+ void
+ Tmmult(SparseMatrix & C,
+ const SparseMatrix &B,
+ const MPI::Vector & V = MPI::Vector()) const;
+
+ private:
+ /**
+ * Copy of the communicator object to be used for this parallel vector.
+ */
+ MPI_Comm communicator;
+
+ /**
+ * Do the actual work for the respective reinit() function and the
+ * matching constructor, i.e. create a matrix. Getting rid of the
+ * previous matrix is left to the caller.
+ *
+ * @deprecated This overload of <code>do_reinit</code> is deprecated:
+ * please use the overload with a sparsity pattern argument instead.
+ */
+ DEAL_II_DEPRECATED
+ void
+ do_reinit(const size_type m,
+ const size_type n,
+ const size_type local_rows,
+ const size_type local_columns,
+ const size_type n_nonzero_per_row,
+ const bool is_symmetric = false,
+ const size_type n_offdiag_nonzero_per_row = 0);
+
+ /**
+ * Same as previous function.
+ *
+ * @deprecated This overload of <code>do_reinit</code> is deprecated:
+ * please use the overload with a sparsity pattern argument instead.
+ */
+ DEAL_II_DEPRECATED
+ void
+ do_reinit(const size_type m,
+ const size_type n,
+ const size_type local_rows,
+ const size_type local_columns,
+ const std::vector<size_type> &row_lengths,
+ const bool is_symmetric = false,
+ const std::vector<size_type> &offdiag_row_lengths =
+ std::vector<size_type>());
+
+ /**
+ * Same as previous functions.
+ */
+ template <typename SparsityPatternType>
+ void
+ do_reinit(const SparsityPatternType & sparsity_pattern,
+ const std::vector<size_type> &local_rows_per_process,
+ const std::vector<size_type> &local_columns_per_process,
+ const unsigned int this_process,
+ const bool preset_nonzero_locations);
+
+ /**
+ * Same as previous functions.
+ */
+ template <typename SparsityPatternType>
+ void
+ do_reinit(const IndexSet & local_rows,
+ const IndexSet & local_columns,
+ const SparsityPatternType &sparsity_pattern);
+
+ /**
+ * To allow calling protected prepare_add() and prepare_set().
+ */
+ friend class BlockMatrixBase<SparseMatrix>;
+ };
+
+
+
+ // -------- template and inline functions ----------
+
+ inline const MPI_Comm &
+ SparseMatrix::get_mpi_communicator() const
+ {
+ return communicator;
+ }
+ } // namespace MPI
} // namespace PETScWrappers
DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_petsc_vector_h
+# define dealii_petsc_vector_h
+
+
+# include <deal.II/base/config.h>
+
+# ifdef DEAL_II_WITH_PETSC
+
+# include <deal.II/base/index_set.h>
+# include <deal.II/base/subscriptor.h>
+
+# include <deal.II/lac/exceptions.h>
+# include <deal.II/lac/petsc_vector_base.h>
+# include <deal.II/lac/vector.h>
+# include <deal.II/lac/vector_operation.h>
+# include <deal.II/lac/vector_type_traits.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+/*! @addtogroup PETScWrappers
+ *@{
+ */
+namespace PETScWrappers
+{
+ /**
+ * Namespace for PETSc classes that work in parallel over MPI, such as
+ * distributed vectors and matrices.
+ *
+ * @ingroup PETScWrappers
+ * @author Wolfgang Bangerth, 2004
+ */
+ namespace MPI
+ {
+ /**
+ * Implementation of a parallel vector class based on PETSC and using MPI
+ * communication to synchronize distributed operations. All the
+ * functionality is actually in the base class, except for the calls to
+ * generate a parallel vector. This is possible since PETSc only works on
+ * an abstract vector type and internally distributes to functions that do
+ * the actual work depending on the actual vector type (much like using
+ * virtual functions). Only the functions creating a vector of specific
+ * type differ, and are implemented in this particular class.
+ *
+ *
+ * <h3>Parallel communication model</h3>
+ *
+ * The parallel functionality of PETSc is built on top of the Message
+ * Passing Interface (MPI). MPI's communication model is built on
+ * collective communications: if one process wants something from another,
+ * that other process has to be willing to accept this communication. A
+ * process cannot query data from another process by calling a remote
+ * function, without that other process expecting such a transaction. The
+ * consequence is that most of the operations in the base class of this
+ * class have to be called collectively. For example, if you want to
+ * compute the l2 norm of a parallel vector, @em all processes across
+ * which this vector is shared have to call the @p l2_norm function. If
+ * you don't do this, but instead only call the @p l2_norm function on one
+ * process, then the following happens: This one process will call one of
+ * the collective MPI functions and wait for all the other processes to
+ * join in on this. Since the other processes don't call this function,
+ * you will either get a time-out on the first process, or, worse, by the
+ * time the next a call to a PETSc function generates an MPI message on
+ * the other processes, you will get a cryptic message that only a subset
+ * of processes attempted a communication. These bugs can be very hard to
+ * figure out, unless you are well-acquainted with the communication model
+ * of MPI, and know which functions may generate MPI messages.
+ *
+ * One particular case, where an MPI message may be generated unexpectedly
+ * is discussed below.
+ *
+ *
+ * <h3>Accessing individual elements of a vector</h3>
+ *
+ * PETSc does allow read access to individual elements of a vector, but in
+ * the distributed case only to elements that are stored locally. We
+ * implement this through calls like <tt>d=vec(i)</tt>. However, if you
+ * access an element outside the locally stored range, an exception is
+ * generated.
+ *
+ * In contrast to read access, PETSc (and the respective deal.II wrapper
+ * classes) allow to write (or add) to individual elements of vectors,
+ * even if they are stored on a different process. You can do this
+ * writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>, or
+ * similar operations. There is one catch, however, that may lead to very
+ * confusing error messages: PETSc requires application programs to call
+ * the compress() function when they switch from adding, to elements to
+ * writing to elements. The reasoning is that all processes might
+ * accumulate addition operations to elements, even if multiple processes
+ * write to the same elements. By the time we call compress() the next
+ * time, all these additions are executed. However, if one process adds to
+ * an element, and another overwrites to it, the order of execution would
+ * yield non-deterministic behavior if we don't make sure that a
+ * synchronization with compress() happens in between.
+ *
+ * In order to make sure these calls to compress() happen at the
+ * appropriate time, the deal.II wrappers keep a state variable that store
+ * which is the presently allowed operation: additions or writes. If it
+ * encounters an operation of the opposite kind, it calls compress() and
+ * flips the state. This can sometimes lead to very confusing behavior, in
+ * code that may for example look like this:
+ * @code
+ * PETScWrappers::MPI::Vector vector;
+ * ...
+ * // do some write operations on the vector
+ * for (unsigned int i=0; i<vector.size(); ++i)
+ * vector(i) = i;
+ *
+ * // do some additions to vector elements, but only for some elements
+ * for (unsigned int i=0; i<vector.size(); ++i)
+ * if (some_condition(i) == true)
+ * vector(i) += 1;
+ *
+ * // do another collective operation
+ * const double norm = vector.l2_norm();
+ * @endcode
+ *
+ * This code can run into trouble: by the time we see the first addition
+ * operation, we need to flush the overwrite buffers for the vector, and
+ * the deal.II library will do so by calling compress(). However, it will
+ * only do so for all processes that actually do an addition -- if the
+ * condition is never true for one of the processes, then this one will
+ * not get to the actual compress() call, whereas all the other ones do.
+ * This gets us into trouble, since all the other processes hang in the
+ * call to flush the write buffers, while the one other process advances
+ * to the call to compute the l2 norm. At this time, you will get an error
+ * that some operation was attempted by only a subset of processes. This
+ * behavior may seem surprising, unless you know that write/addition
+ * operations on single elements may trigger this behavior.
+ *
+ * The problem described here may be avoided by placing additional calls
+ * to compress(), or making sure that all processes do the same type of
+ * operations at the same time, for example by placing zero additions if
+ * necessary.
+ *
+ * @see
+ * @ref GlossGhostedVector "vectors with ghost elements"
+ *
+ * @ingroup PETScWrappers
+ * @ingroup Vectors
+ * @author Wolfgang Bangerth, 2004
+ */
+ class Vector : public VectorBase
+ {
+ public:
+ /**
+ * Declare type for container size.
+ */
+ using size_type = types::global_dof_index;
+
+ /**
+ * Default constructor. Initialize the vector as empty.
+ */
+ Vector();
+
+ /**
+ * Constructor. Set dimension to @p n and initialize all elements with
+ * zero.
+ *
+ * @arg local_size denotes the size of the chunk that shall be stored on
+ * the present process.
+ *
+ * @arg communicator denotes the MPI communicator over which the
+ * different parts of the vector shall communicate
+ *
+ * The constructor is made explicit to avoid accidents like this:
+ * <tt>v=0;</tt>. Presumably, the user wants to set every element of the
+ * vector to zero, but instead, what happens is this call:
+ * <tt>v=Vector@<number@>(0);</tt>, i.e. the vector is replaced by one
+ * of length zero.
+ */
+ explicit Vector(const MPI_Comm &communicator,
+ const size_type n,
+ const size_type local_size);
+
+
+ /**
+ * Copy-constructor from deal.II vectors. Sets the dimension to that of
+ * the given vector, and copies all elements.
+ *
+ * @arg local_size denotes the size of the chunk that shall be stored on
+ * the present process.
+ *
+ * @arg communicator denotes the MPI communicator over which the
+ * different parts of the vector shall communicate
+ */
+ template <typename Number>
+ explicit Vector(const MPI_Comm & communicator,
+ const dealii::Vector<Number> &v,
+ const size_type local_size);
+
+
+ /**
+ * Copy-constructor the values from a PETSc wrapper vector class.
+ *
+ * @arg local_size denotes the size of the chunk that shall be stored on
+ * the present process.
+ *
+ * @arg communicator denotes the MPI communicator over which the
+ * different parts of the vector shall communicate
+ *
+ * @deprecated The use of objects that are explicitly of type VectorBase
+ * is deprecated: use PETScWrappers::MPI::Vector instead.
+ */
+ DEAL_II_DEPRECATED
+ explicit Vector(const MPI_Comm & communicator,
+ const VectorBase &v,
+ const size_type local_size);
+
+ /**
+ * Construct a new parallel ghosted PETSc vector from IndexSets.
+ *
+ * Note that @p local must be ascending and 1:1, see
+ * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in
+ * @p local need to be contiguous, meaning you can only create vectors
+ * from a DoFHandler with several finite element components if they are
+ * not reordered by component (use a PETScWrappers::BlockVector
+ * otherwise). The global size of the vector is determined by
+ * local.size(). The global indices in @p ghost are supplied as ghost
+ * indices so that they can be read locally.
+ *
+ * Note that the @p ghost IndexSet may be empty and that any indices
+ * already contained in @p local are ignored during construction. That
+ * way, the ghost parameter can equal the set of locally relevant
+ * degrees of freedom, see step-32.
+ *
+ * @note This operation always creates a ghosted vector, which is considered
+ * read-only.
+ *
+ * @see
+ * @ref GlossGhostedVector "vectors with ghost elements"
+ */
+ Vector(const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &communicator);
+
+ /**
+ * Construct a new parallel PETSc vector without ghost elements from an
+ * IndexSet.
+ *
+ * Note that @p local must be ascending and 1:1, see
+ * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in
+ * @p local need to be contiguous, meaning you can only create vectors
+ * from a DoFHandler with several finite element components if they are
+ * not reordered by component (use a PETScWrappers::BlockVector
+ * otherwise).
+ */
+ explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
+
+ /**
+ * Release all memory and return to a state just like after having
+ * called the default constructor.
+ */
+ virtual void
+ clear() override;
+
+ /**
+ * Copy the given vector. Resize the present vector if necessary. Also
+ * take over the MPI communicator of @p v.
+ */
+ Vector &
+ operator=(const Vector &v);
+
+ /**
+ * Set all components of the vector to the given number @p s. Simply
+ * pass this down to the base class, but we still need to declare this
+ * function to make the example given in the discussion about making the
+ * constructor explicit work.
+ */
+ Vector &
+ operator=(const PetscScalar s);
+
+ /**
+ * Copy the values of a deal.II vector (as opposed to those of the PETSc
+ * vector wrapper class) into this object.
+ *
+ * Contrary to the case of sequential vectors, this operators requires
+ * that the present vector already has the correct size, since we need
+ * to have a partition and a communicator present which we otherwise
+ * can't get from the source vector.
+ */
+ template <typename number>
+ Vector &
+ operator=(const dealii::Vector<number> &v);
+
+ /**
+ * Change the dimension of the vector to @p N. It is unspecified how
+ * resizing the vector affects the memory allocation of this object;
+ * i.e., it is not guaranteed that resizing it to a smaller size
+ * actually also reduces memory consumption, or if for efficiency the
+ * same amount of memory is used
+ *
+ * @p local_size denotes how many of the @p N values shall be stored
+ * locally on the present process. for less data.
+ *
+ * @p communicator denotes the MPI communicator henceforth to be used
+ * for this vector.
+ *
+ * If @p omit_zeroing_entries is false, the vector is filled by zeros.
+ * Otherwise, the elements are left an unspecified state.
+ */
+ void
+ reinit(const MPI_Comm &communicator,
+ const size_type N,
+ const size_type local_size,
+ const bool omit_zeroing_entries = false);
+
+ /**
+ * Change the dimension to that of the vector @p v, and also take over
+ * the partitioning into local sizes as well as the MPI communicator.
+ * The same applies as for the other @p reinit function.
+ *
+ * The elements of @p v are not copied, i.e. this function is the same
+ * as calling <tt>reinit(v.size(), v.local_size(),
+ * omit_zeroing_entries)</tt>.
+ */
+ void
+ reinit(const Vector &v, const bool omit_zeroing_entries = false);
+
+ /**
+ * Reinit as a vector with ghost elements. See the constructor with
+ * same signature for more details.
+ *
+ * @see
+ * @ref GlossGhostedVector "vectors with ghost elements"
+ */
+ void
+ reinit(const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &communicator);
+
+ /**
+ * Reinit as a vector without ghost elements. See constructor with same
+ * signature for more details.
+ *
+ * @see
+ * @ref GlossGhostedVector "vectors with ghost elements"
+ */
+ void
+ reinit(const IndexSet &local, const MPI_Comm &communicator);
+
+ /**
+ * Return a reference to the MPI communicator object in use with this
+ * vector.
+ */
+ const MPI_Comm &
+ get_mpi_communicator() const override;
+
+ /**
+ * Print to a stream. @p precision denotes the desired precision with
+ * which values shall be printed, @p scientific whether scientific
+ * notation shall be used. If @p across is @p true then the vector is
+ * printed in a line, while if @p false then the elements are printed on
+ * a separate line each.
+ *
+ * @note This function overloads the one in the base class to ensure
+ * that the right thing happens for parallel vectors that are
+ * distributed across processors.
+ */
+ void
+ print(std::ostream & out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * @copydoc PETScWrappers::VectorBase::all_zero()
+ *
+ * @note This function overloads the one in the base class to make this
+ * a collective operation.
+ */
+ bool
+ all_zero() const;
+
+ protected:
+ /**
+ * Create a vector of length @p n. For this class, we create a parallel
+ * vector. @p n denotes the total size of the vector to be created. @p
+ * local_size denotes how many of these elements shall be stored
+ * locally.
+ */
+ virtual void
+ create_vector(const size_type n, const size_type local_size);
+
+
+
+ /**
+ * Create a vector of global length @p n, local size @p local_size and
+ * with the specified ghost indices. Note that you need to call
+ * update_ghost_values() before accessing those.
+ */
+ virtual void
+ create_vector(const size_type n,
+ const size_type local_size,
+ const IndexSet &ghostnodes);
+
+
+ private:
+ /**
+ * Copy of the communicator object to be used for this parallel vector.
+ */
+ MPI_Comm communicator;
+ };
+
+
+ // ------------------ template and inline functions -------------
+
+
+ /**
+ * Global function @p swap which overloads the default implementation of
+ * the C++ standard library which uses a temporary object. The function
+ * simply exchanges the data of the two vectors.
+ *
+ * @relatesalso PETScWrappers::MPI::Vector
+ * @author Wolfgang Bangerth, 2004
+ */
+ inline void
+ swap(Vector &u, Vector &v)
+ {
+ u.swap(v);
+ }
+
+
+# ifndef DOXYGEN
+
+ template <typename number>
+ Vector::Vector(const MPI_Comm & communicator,
+ const dealii::Vector<number> &v,
+ const size_type local_size)
+ : communicator(communicator)
+ {
+ Vector::create_vector(v.size(), local_size);
+
+ *this = v;
+ }
+
+
+
+ inline Vector &
+ Vector::operator=(const PetscScalar s)
+ {
+ VectorBase::operator=(s);
+
+ return *this;
+ }
+
+
+
+ template <typename number>
+ inline Vector &
+ Vector::operator=(const dealii::Vector<number> &v)
+ {
+ Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
+
+ // FIXME: the following isn't necessarily fast, but this is due to
+ // the fact that PETSc doesn't offer an inlined access operator.
+ //
+ // if someone wants to contribute some code: to make this code
+ // faster, one could either first convert all values to PetscScalar,
+ // and then set them all at once using VecSetValues. This has the
+ // drawback that it could take quite some memory, if the vector is
+ // large, and it would in addition allocate memory on the heap, which
+ // is expensive. an alternative would be to split the vector into
+ // chunks of, say, 128 elements, convert a chunk at a time and set it
+ // in the output vector using VecSetValues. since 128 elements is
+ // small enough, this could easily be allocated on the stack (as a
+ // local variable) which would make the whole thing much more
+ // efficient.
+ //
+ // a second way to make things faster is for the special case that
+ // number==PetscScalar. we could then declare a specialization of
+ // this template, and omit the conversion. the problem with this is
+ // that the best we can do is to use VecSetValues, but this isn't
+ // very efficient either: it wants to see an array of indices, which
+ // in this case a) again takes up a whole lot of memory on the heap,
+ // and b) is totally dumb since its content would simply be the
+ // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
+ // function in Petsc that would take a pointer to an array of
+ // PetscScalar values and simply copy n elements verbatim into the
+ // vector...
+ for (size_type i = 0; i < v.size(); ++i)
+ (*this)(i) = v(i);
+
+ compress(::dealii::VectorOperation::insert);
+
+ return *this;
+ }
+
+
+
+ inline const MPI_Comm &
+ Vector::get_mpi_communicator() const
+ {
+ return communicator;
+ }
+
+# endif // DOXYGEN
+ } // namespace MPI
+} // namespace PETScWrappers
+
+namespace internal
+{
+ namespace LinearOperatorImplementation
+ {
+ template <typename>
+ class ReinitHelper;
+
+ /**
+ * A helper class used internally in linear_operator.h. Specialization for
+ * PETScWrappers::MPI::Vector.
+ */
+ template <>
+ class ReinitHelper<PETScWrappers::MPI::Vector>
+ {
+ public:
+ template <typename Matrix>
+ static void
+ reinit_range_vector(const Matrix & matrix,
+ PETScWrappers::MPI::Vector &v,
+ bool /*omit_zeroing_entries*/)
+ {
+ v.reinit(matrix.locally_owned_range_indices(),
+ matrix.get_mpi_communicator());
+ }
+
+ template <typename Matrix>
+ static void
+ reinit_domain_vector(const Matrix & matrix,
+ PETScWrappers::MPI::Vector &v,
+ bool /*omit_zeroing_entries*/)
+ {
+ v.reinit(matrix.locally_owned_domain_indices(),
+ matrix.get_mpi_communicator());
+ }
+ };
+
+ } // namespace LinearOperatorImplementation
+} /* namespace internal */
+
+/**@}*/
+
+
+/**
+ * Declare dealii::PETScWrappers::MPI::Vector as distributed vector.
+ *
+ * @author Uwe Koecher, 2017
+ */
+template <>
+struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
+{};
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+# endif // DEAL_II_WITH_PETSC
+
+#endif
+/*------------------------- petsc_vector.h -------------------------*/
#include <boost/io/ios_state.hpp>
#ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_vector.h>
#endif
#ifdef DEAL_II_WITH_TRILINOS
#include <deal.II/lac/block_sparsity_pattern.h>
#include <deal.II/lac/block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/vector_memory.h>
#include <deal.II/grid/tria_iterator.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/numerics/matrix_tools.h>
#ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-# include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_sparse_matrix.h>
# include <deal.II/lac/petsc_sparse_matrix.h>
+# include <deal.II/lac/petsc_vector.h>
#endif
#ifdef DEAL_II_WITH_TRILINOS
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/solver_gmres.h>
# include <deal.II/base/mpi.h>
# include <deal.II/base/parameter_handler.h>
# ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# endif
# include <deal.II/lac/vector.h>
# include <deal.II/lac/vector_memory.h>
# endif
# ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# endif
DEAL_II_NAMESPACE_OPEN
# include <deal.II/base/mpi.h>
# include <deal.II/base/parameter_handler.h>
# ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# endif
# include <deal.II/lac/vector.h>
# include <deal.II/lac/vector_memory.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#endif
#ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# include <petscsys.h>
#endif
#include <deal.II/lac/block_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/block_vector.h>
# include <deal.II/lac/la_parallel_block_vector.h>
# include <deal.II/lac/la_parallel_vector.h>
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# include <deal.II/lac/trilinos_parallel_block_vector.h>
# include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_matrix_array.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/vector.h>
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_compatibility.h>
# include <deal.II/lac/petsc_full_matrix.h>
-# include <deal.II/lac/petsc_parallel_sparse_matrix.h>
# include <deal.II/lac/petsc_sparse_matrix.h>
# include <deal.II/lac/petsc_vector_base.h>
//
// ---------------------------------------------------------------------
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
#ifdef DEAL_II_WITH_PETSC
//
// ---------------------------------------------------------------------
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#ifdef DEAL_II_WITH_PETSC
//
// ---------------------------------------------------------------------
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/dynamic_sparsity_pattern.h>
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_compatibility.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# include <deal.II/lac/sparsity_pattern.h>
DEAL_II_NAMESPACE_OPEN
#include <deal.II/base/mpi.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_compatibility.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# include <cmath>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/block_sparse_matrix.h>
#include <deal.II/lac/block_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/sparsity_pattern.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/numerics/matrix_tools.h>
#ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-# include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_sparse_matrix.h>
# include <deal.II/lac/petsc_sparse_matrix.h>
+# include <deal.II/lac/petsc_vector.h>
#endif
#ifdef DEAL_II_WITH_TRILINOS
#include <deal.II/numerics/matrix_tools.h>
#ifdef DEAL_II_WITH_PETSC
+# include <deal.II/lac/petsc_block_sparse_matrix.h>
+# include <deal.II/lac/petsc_block_vector.h>
# include <deal.II/lac/petsc_matrix_base.h>
-# include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-# include <deal.II/lac/petsc_parallel_block_vector.h>
# include <deal.II/lac/petsc_vector_base.h>
#endif
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
# include <deal.II/lac/trilinos_vector.h>
# endif
# ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# endif
# include <deal.II/base/utilities.h>
# include <deal.II/lac/trilinos_vector.h>
# endif
# ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# endif
# include <deal.II/base/utilities.h>
# include <deal.II/lac/trilinos_vector.h>
# endif
# ifdef DEAL_II_WITH_PETSC
-# include <deal.II/lac/petsc_parallel_block_vector.h>
-# include <deal.II/lac/petsc_parallel_vector.h>
+# include <deal.II/lac/petsc_block_vector.h>
+# include <deal.II/lac/petsc_vector.h>
# endif
# include <deal.II/base/utilities.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/parpack_solver.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/parpack_solver.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/hp/fe_values.h>
#include <deal.II/hp/q_collection.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/hp/fe_values.h>
#include <deal.II/hp/q_collection.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/generic_linear_algebra.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/hp/q_collection.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/hp/q_collection.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include "../tests.h"
// Vectors:
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
// Block Matrix and Vectors:
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_vector.h>
using namespace dealii;
// 3.98974 > 3.95906 > 3.90828 > 3.83792
#include <deal.II/lac/petsc_compatibility.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/utilities.h>
#include <deal.II/base/mpi.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include "../tests.h"
// check is_serial_vector type trait
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/lac/filtered_matrix.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/identity_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/filtered_matrix.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/identity_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/filtered_matrix.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/identity_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <iostream>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <iostream>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/grid/grid_generator.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/grid/grid_generator.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/grid/grid_tools.h>
#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/data_out_faces.h>
// ---------------------------------------------------------------------
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "fe_tools_extrapolate_common.h"
// ---------------------------------------------------------------------
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include "fe_tools_extrapolate_common.h"
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/trilinos_precondition.h>
#include <deal.II/lac/trilinos_solver.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_accessor.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_accessor.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_accessor.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_accessor.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/base/utilities.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include "../tests.h"
#include <deal.II/base/utilities.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include "../tests.h"
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_control.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
// x_j's so that we can verify the correctness analytically
#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <sstream>
// contiguous
#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <sstream>
// like _01, but with an inhomogeneity
#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <sstream>
// contiguous
#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <sstream>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/multigrid/mg_constrained_dofs.h>
// check PETScWrappers::MPI::Vector::size()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
// check PETScWrappers::MPI::Vector::operator() in set-mode
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator() in add-mode
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::l1_norm()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::l2_norm()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::linfty_norm()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator *=
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator /=
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are
// orthogonal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are
// not orthogonal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// this function has since been removed, so we test for v=0 instead, although
// that may be covered by one of the other tests
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator = (PetscScalar) with setting to a
// nonzero value
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator = (Vector)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator = (Vector), except that we don't
// resize the vector to be copied to beforehand
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::reinit(fast)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::reinit(!fast)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::l2_norm()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::mean_value()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::lp_norm(3)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::all_zero
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator+=(Vector)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator-=(Vector)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::add (scalar)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::add(scalar, Vector)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::add(s,V,s,V)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::sadd(s, Vector)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::sadd(scalar, scalar, Vector)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::scale
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::equ (s,V)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::ratio
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator = (Vector<PetscVector>)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <iostream>
// check PETScWrappers::MPI::Vector::operator = (Vector<T>) with T!=PetscScalar
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <iostream>
// check copy constructor PETScWrappers::MPI::Vector::Vector(Vector)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator() in set, and later in *= mode
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator() in set, and later in /= mode
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::is_non_zero
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check ::Vector (const PETScWrappers::MPI::Vector &) copy constructor
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <iostream>
// check PETScWrappers::MPI::Vector (const ::Vector &) copy constructor
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <iostream>
// check ::Vector::operator = (const PETScWrappers::MPI::Vector &)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <iostream>
// check PETScWrappers::MPI::Vector::operator = (const ::Vector &)
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <iostream>
// PETScWrappers::MatrixBase::operator=
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/vector.h>
// This test used to fail after upgrading to petsc 2.2.1
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PetscScalar
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../tests.h"
// make sure that block vector iterator allows reading and writing correctly
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <iostream>
// like _01, except that we use operator[] instead of operator*
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <iostream>
// this test is an adaptation of lac/block_vector_iterator for PETSc block
// vectors
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include <algorithm>
#include <iostream>
#include <deal.II/base/index_set.h>
#include <deal.II/base/mpi.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include "../tests.h"
#include <deal.II/base/index_set.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// test the CG solver using the PETSc matrix and vector classes
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_bicgstab.h>
#include <deal.II/lac/solver_cg.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver.h>
#include <deal.II/lac/solver_bicgstab.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver.h>
#include <deal.II/lac/solver_bicgstab.h>
// test the MINRES solver using the PETSc matrix and vector classes
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_bicgstab.h>
#include <deal.II/lac/solver_cg.h>
// test the QMRS solver using the PETSc matrix and vector classes
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_bicgstab.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/sparsity_pattern.h>
// check FullMatrix::vmult
#include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check FullMatrix::Tvmult
#include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check FullMatrix::vmult_add
#include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check FullMatrix::Tvmult_add
#include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check FullMatrix::matrix_scalar_product
#include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check FullMatrix::matrix_norm_square
#include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check FullMatrix::matrix_norm_square
#include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/base/index_set.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/petsc_precondition.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
#include <iostream>
#include <vector>
// malloc calls have been performed
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
#include "../tests.h"
#include <deal.II/base/index_set.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/petsc_precondition.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
#include <iostream>
#include <vector>
#include <deal.II/base/index_set.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
//
// the tests build the 5-point stencil matrix for a uniform grid of size N*N
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <iostream>
//
// the tests build the 5-point stencil matrix for a uniform grid of size N*N
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <iostream>
// matrix in a consecutive fashion, but rather according to the order of
// degrees of freedom in the sequence of cells that we traverse
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <iostream>
// test the PETSc Richardson solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../testmatrix.h"
#include "../tests.h"
// test the PETSc Chebychev solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../testmatrix.h"
#include "../tests.h"
// test the PETSc CG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CG solver with PETSc MatrixFree class
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// preconditioner
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// matrix
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// preconditioner. This should converge in exactly one iteration
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc BiCG solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc GMRES solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc Bicgstab solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CGS solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc TFQMR solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc CR solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc LSQR solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../testmatrix.h"
#include "../tests.h"
// test the PETSc PreOnly solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETScWrapper::Precondition*::vmult
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// test the PETSc SparseDirectMumps solver
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// check SparseMatrix::mmult
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::Tmmult
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::mmult
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::Tmmult
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::vmult
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::Tvmult
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::vmult_add
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::Tvmult_add
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::matrix_scalar_product
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::matrix_norm_square
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check SparseMatrix::matrix_norm_square
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check VectorTools::subtract_mean_value() for PETSc vectors
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/numerics/vector_tools.h>
// the top level directory of deal.II.
//
// ---------------------------------------------------------------------
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
#include "../tests.h"
//
// this was fixed 2004-04-05, and this test checks that it works
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// argument to the user-defined operator+=. This is not exciting, but since I
// wrote the test to make sure it works this way, let's keep it then...
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
// for vectors that are not equal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
// for vectors that are equal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator!=(PETScWrappers::MPI::Vector)
// for vectors that are not equal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator!=(PETScWrappers::MPI::Vector)
// for vectors that are equal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// verify that VectorBase::print uses the precision parameter correctly and
// restores the previous value of the stream precision
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// Test the constructor PETScWrappers::VectorBase(const Vec &) that takes an
// existing PETSc vector.
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::size()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
// check PETScWrappers::MPI::Vector::operator() in set-mode
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator() in add-mode
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::l1_norm()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::l2_norm()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::linfty_norm()
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are
// not orthogonal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <numeric>
// deal.II includes
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <cassert>
#include <complex>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include "../tests.h"
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
// Note: This is (almost) a clone of the tests/petsc/solver_01.cc
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../testmatrix.h"
#include "../tests.h"
// Note: This is (almost) a clone of the tests/petsc/solver_02.cc
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include "../testmatrix.h"
#include "../tests.h"
// Note: This is (almost) a clone of the tests/petsc/solver_03.cc
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// numbers to a possibly
// complex matrix where
// petsc-scalar=complex.
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// Note: This is (almost) a clone of the tests/petsc/solver_03.cc
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector_memory.h>
#include <iostream>
// check assignment of elements in Vector
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/vector.h>
#include <iostream>
// See notes in petsc/vector_assign_01.cc
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// this is equivalent to the petsc_parallel_vector_assign_01 test, except that
// we use operator+= instead of operator=. This is also not exciting...
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
// for vectors that are not equal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
// for vectors that are equal
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// verify that VectorBase::print uses the precision parameter correctly and
// restores the previous value of the stream precision
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
// Test the constructor PETScWrappers::VectorBase(const Vec &) that takes an
// existing PETSc vector for complex values.
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <iostream>
#include <vector>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/lac/petsc_compatibility.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/vector_memory.h>
#include <deal.II/lac/petsc_compatibility.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/vector_memory.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/base/mpi.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
#include <deal.II/sundials/copy.h>