]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Rename petsc_parallel_* headers to petsc_* 6986/head
authorDaniel Arndt <daniel.arndt@iwr.uni-heidelberg.de>
Thu, 26 Jul 2018 12:18:24 +0000 (14:18 +0200)
committerDaniel Arndt <daniel.arndt@iwr.uni-heidelberg.de>
Thu, 26 Jul 2018 12:22:08 +0000 (14:22 +0200)
256 files changed:
include/deal.II/fe/fe_tools_extrapolate.templates.h
include/deal.II/fe/fe_tools_interpolate.templates.h
include/deal.II/lac/affine_constraints.templates.h
include/deal.II/lac/generic_linear_algebra.h
include/deal.II/lac/la_parallel_block_vector.templates.h
include/deal.II/lac/la_parallel_vector.templates.h
include/deal.II/lac/petsc_block_sparse_matrix.h [new file with mode: 0644]
include/deal.II/lac/petsc_block_vector.h [new file with mode: 0644]
include/deal.II/lac/petsc_matrix_free.h
include/deal.II/lac/petsc_parallel_block_sparse_matrix.h
include/deal.II/lac/petsc_parallel_block_vector.h
include/deal.II/lac/petsc_parallel_sparse_matrix.h
include/deal.II/lac/petsc_parallel_vector.h
include/deal.II/lac/petsc_sparse_matrix.h
include/deal.II/lac/petsc_vector.h [new file with mode: 0644]
include/deal.II/lac/read_write_vector.templates.h
include/deal.II/multigrid/mg_transfer.h
include/deal.II/multigrid/mg_transfer.templates.h
include/deal.II/numerics/error_estimator.templates.h
include/deal.II/numerics/matrix_creator.templates.h
include/deal.II/numerics/vector_tools.templates.h
include/deal.II/sundials/arkode.h
include/deal.II/sundials/copy.h
include/deal.II/sundials/ida.h
source/algorithms/operator.cc
source/base/mpi.cc
source/base/time_stepping.cc
source/distributed/solution_transfer.cc
source/dofs/dof_accessor_get.cc
source/dofs/dof_accessor_set.cc
source/fe/fe_values.cc
source/fe/mapping_fe_field.cc
source/fe/mapping_q1_eulerian.cc
source/fe/mapping_q_eulerian.cc
source/lac/block_matrix_array.cc
source/lac/petsc_matrix_base.cc
source/lac/petsc_parallel_block_sparse_matrix.cc
source/lac/petsc_parallel_block_vector.cc
source/lac/petsc_parallel_sparse_matrix.cc
source/lac/petsc_parallel_vector.cc
source/lac/petsc_vector_base.cc
source/lac/solver.cc
source/lac/vector_memory.cc
source/meshworker/mesh_worker_vector_selector.cc
source/multigrid/mg_base.cc
source/multigrid/multigrid.cc
source/non_matching/coupling.cc
source/numerics/data_out_dof_data.cc
source/numerics/data_out_dof_data_codim.cc
source/numerics/data_out_dof_data_inst2.cc
source/numerics/derivative_approximation.cc
source/numerics/dof_output_operator.cc
source/numerics/error_estimator_1d.cc
source/numerics/fe_field_function.cc
source/numerics/matrix_tools.cc
source/numerics/matrix_tools_once.cc
source/numerics/point_value_history.cc
source/numerics/solution_transfer.cc
source/sundials/arkode.cc
source/sundials/ida.cc
source/sundials/kinsol.cc
tests/arpack/parpack_advection_diffusion_petsc.cc
tests/arpack/step-36_parpack.cc
tests/fe/fe_enriched_step-36.cc
tests/fe/fe_enriched_step-36b.cc
tests/gla/extract_subvector_to.cc
tests/hp/hp_constraints_neither_dominate_01.cc
tests/hp/hp_constraints_neither_dominate_02.cc
tests/lac/linear_operator_09.cc
tests/lac/utilities_01.cc
tests/lac/vector_reinit_03.cc
tests/lac/vector_type_traits_is_serial_03.cc
tests/mappings/mapping_q_eulerian_07.cc
tests/mappings/mapping_q_eulerian_08.cc
tests/matrix_free/interpolate_to_mg.cc
tests/mpi/blockvec_01.cc
tests/mpi/blockvec_02.cc
tests/mpi/condense_01.cc
tests/mpi/constraint_matrix_condense_01.cc
tests/mpi/constraint_matrix_set_zero_01.cc
tests/mpi/data_out_faces_01.cc
tests/mpi/fe_tools_extrapolate_02.cc
tests/mpi/fe_tools_extrapolate_05.cc
tests/mpi/ghost_01.cc
tests/mpi/ghost_02.cc
tests/mpi/ghost_03.cc
tests/mpi/has_hanging_nodes.cc
tests/mpi/hp_step-40.cc
tests/mpi/interpolate_02.cc
tests/mpi/interpolate_04.cc
tests/mpi/p4est_save_01.cc
tests/mpi/p4est_save_02.cc
tests/mpi/p4est_save_03.cc
tests/mpi/p4est_save_04.cc
tests/mpi/periodicity_01.cc
tests/mpi/petsc_01.cc
tests/mpi/petsc_02.cc
tests/mpi/petsc_03.cc
tests/mpi/petsc_bug_ghost_vector_01.cc
tests/mpi/petsc_distribute_01.cc
tests/mpi/petsc_distribute_01_block.cc
tests/mpi/petsc_distribute_01_inhomogenous.cc
tests/mpi/petsc_locally_owned_elements.cc
tests/mpi/solution_transfer_01.cc
tests/mpi/step-40.cc
tests/mpi/step-40_cuthill_mckee.cc
tests/mpi/step-40_cuthill_mckee_MPI-subset.cc
tests/mpi/step-40_direct_solver.cc
tests/multigrid/transfer_04b.cc
tests/petsc/11.cc
tests/petsc/12.cc
tests/petsc/13.cc
tests/petsc/17.cc
tests/petsc/18.cc
tests/petsc/19.cc
tests/petsc/20.cc
tests/petsc/21.cc
tests/petsc/22.cc
tests/petsc/23.cc
tests/petsc/24.cc
tests/petsc/26.cc
tests/petsc/27.cc
tests/petsc/28.cc
tests/petsc/29.cc
tests/petsc/30.cc
tests/petsc/31.cc
tests/petsc/32.cc
tests/petsc/33.cc
tests/petsc/34.cc
tests/petsc/35.cc
tests/petsc/36.cc
tests/petsc/37.cc
tests/petsc/39.cc
tests/petsc/40.cc
tests/petsc/41.cc
tests/petsc/42.cc
tests/petsc/45.cc
tests/petsc/46.cc
tests/petsc/48.cc
tests/petsc/49.cc
tests/petsc/50.cc
tests/petsc/51.cc
tests/petsc/55.cc
tests/petsc/56.cc
tests/petsc/57.cc
tests/petsc/58.cc
tests/petsc/59.cc
tests/petsc/60.cc
tests/petsc/61.cc
tests/petsc/64.cc
tests/petsc/65.cc
tests/petsc/70.cc
tests/petsc/block_vector_iterator_01.cc
tests/petsc/block_vector_iterator_02.cc
tests/petsc/block_vector_iterator_03.cc
tests/petsc/copy_parallel_vector.cc
tests/petsc/copy_to_dealvec.cc
tests/petsc/copy_to_dealvec_block.cc
tests/petsc/deal_solver_01.cc
tests/petsc/deal_solver_02.cc
tests/petsc/deal_solver_03.cc
tests/petsc/deal_solver_04.cc
tests/petsc/deal_solver_05.cc
tests/petsc/different_matrix_preconditioner.cc
tests/petsc/full_matrix_vector_01.cc
tests/petsc/full_matrix_vector_02.cc
tests/petsc/full_matrix_vector_03.cc
tests/petsc/full_matrix_vector_04.cc
tests/petsc/full_matrix_vector_05.cc
tests/petsc/full_matrix_vector_06.cc
tests/petsc/full_matrix_vector_07.cc
tests/petsc/iterate_parallel_01.cc
tests/petsc/parallel_sparse_matrix_01.cc
tests/petsc/reinit_preconditioner_01.cc
tests/petsc/reinit_preconditioner_02.cc
tests/petsc/slowness_02.cc
tests/petsc/slowness_03.cc
tests/petsc/slowness_04.cc
tests/petsc/solver_01.cc
tests/petsc/solver_02.cc
tests/petsc/solver_03.cc
tests/petsc/solver_03_mf.cc
tests/petsc/solver_03_precondition_boomeramg.cc
tests/petsc/solver_03_precondition_boomeramg_symmetric.cc
tests/petsc/solver_03_precondition_eisenstat.cc
tests/petsc/solver_03_precondition_icc.cc
tests/petsc/solver_03_precondition_ilu.cc
tests/petsc/solver_03_precondition_lu.cc
tests/petsc/solver_03_precondition_parasails.cc
tests/petsc/solver_03_precondition_sor.cc
tests/petsc/solver_03_precondition_ssor.cc
tests/petsc/solver_04.cc
tests/petsc/solver_05.cc
tests/petsc/solver_06.cc
tests/petsc/solver_07.cc
tests/petsc/solver_08.cc
tests/petsc/solver_09.cc
tests/petsc/solver_10.cc
tests/petsc/solver_11.cc
tests/petsc/solver_12.cc
tests/petsc/solver_13.cc
tests/petsc/sparse_direct_mumps.cc
tests/petsc/sparse_matrix_matrix_01.cc
tests/petsc/sparse_matrix_matrix_02.cc
tests/petsc/sparse_matrix_matrix_03.cc
tests/petsc/sparse_matrix_matrix_04.cc
tests/petsc/sparse_matrix_vector_01.cc
tests/petsc/sparse_matrix_vector_02.cc
tests/petsc/sparse_matrix_vector_03.cc
tests/petsc/sparse_matrix_vector_04.cc
tests/petsc/sparse_matrix_vector_05.cc
tests/petsc/sparse_matrix_vector_06.cc
tests/petsc/sparse_matrix_vector_07.cc
tests/petsc/subtract_mean_value_03.cc
tests/petsc/update_ghosts.cc
tests/petsc/vector_assign_01.cc
tests/petsc/vector_assign_02.cc
tests/petsc/vector_equality_1.cc
tests/petsc/vector_equality_2.cc
tests/petsc/vector_equality_3.cc
tests/petsc/vector_equality_4.cc
tests/petsc/vector_print.cc
tests/petsc/vector_wrap_01.cc
tests/petsc_complex/11.cc
tests/petsc_complex/12.cc
tests/petsc_complex/13.cc
tests/petsc_complex/17.cc
tests/petsc_complex/18.cc
tests/petsc_complex/19.cc
tests/petsc_complex/20.cc
tests/petsc_complex/assemble_01.cc
tests/petsc_complex/element_access_00.cc
tests/petsc_complex/fe_get_function_values.cc
tests/petsc_complex/parallel_sparse_matrix_01.cc
tests/petsc_complex/solver_real_01.cc
tests/petsc_complex/solver_real_02.cc
tests/petsc_complex/solver_real_03.cc
tests/petsc_complex/solver_real_03_mf.cc
tests/petsc_complex/solver_real_04.cc
tests/petsc_complex/vector_02.cc
tests/petsc_complex/vector_assign_01.cc
tests/petsc_complex/vector_assign_02.cc
tests/petsc_complex/vector_equality_1.cc
tests/petsc_complex/vector_equality_2.cc
tests/petsc_complex/vector_print.cc
tests/petsc_complex/vector_wrap_01.cc
tests/physics/step-18-rotation_matrix.cc
tests/physics/step-18.cc
tests/quick_tests/step-petsc.cc
tests/quick_tests/step-slepc.cc
tests/slepc/solve_01.cc
tests/slepc/solve_04.cc
tests/slepc/step-36_parallel.cc
tests/slepc/step-36_parallel_02.cc
tests/slepc/step-36_parallel_03.cc
tests/sundials/copy_01.cc

index 56daaa1e7186032b7592dd2f4b79d6761e3b0767..01492c650f02dbb61c35f8a822b4d9f7a4a7106c 100644 (file)
@@ -33,8 +33,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 
index 296a5cda473c636eac4683dec38a4083284da923..715cc6ce6934bf3563ebadf40dd0d99d9a58a978 100644 (file)
@@ -43,8 +43,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 291a7ee420b44ea7d3a2616d5e6f7df90cef4cff..40080fdd715441465cde9181be2f39fda28a229d 100644 (file)
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
 #include <deal.II/lac/matrix_block.h>
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/sparse_matrix_ez.h>
 #include <deal.II/lac/sparsity_pattern.h>
index 5187bb79a6576d9b12de61312841b17aa96065a7..b4f2a8e1c678cc42cf081f1ad7e1c47bba1302b0 100644 (file)
@@ -50,10 +50,10 @@ DEAL_II_NAMESPACE_CLOSE
 #ifdef DEAL_II_WITH_PETSC
 
 #  include <deal.II/lac/block_sparsity_pattern.h>
-#  include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#  include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#  include <deal.II/lac/petsc_block_sparse_matrix.h>
 #  include <deal.II/lac/petsc_precondition.h>
 #  include <deal.II/lac/petsc_solver.h>
+#  include <deal.II/lac/petsc_sparse_matrix.h>
 
 DEAL_II_NAMESPACE_OPEN
 
index d2567d59d3d19a26358e78e6912ac6ca8b94f1bc..fecbee6055f25922e87b42be8cec30158f70b000 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/lapack_support.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/vector.h>
 
index 44a45b188948d5ed829ce1dcf7f1757511061873..d4d233d820e8686c6e6643b7720d81df27f9a0e4 100644 (file)
@@ -23,7 +23,7 @@
 
 #include <deal.II/lac/exceptions.h>
 #include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/read_write_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector_operations_internal.h>
diff --git a/include/deal.II/lac/petsc_block_sparse_matrix.h b/include/deal.II/lac/petsc_block_sparse_matrix.h
new file mode 100644 (file)
index 0000000..9688daf
--- /dev/null
@@ -0,0 +1,361 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_petsc_block_sparse_matrix_h
+#define dealii_petsc_block_sparse_matrix_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_PETSC
+
+#  include <deal.II/base/table.h>
+
+#  include <deal.II/lac/block_matrix_base.h>
+#  include <deal.II/lac/block_sparsity_pattern.h>
+#  include <deal.II/lac/exceptions.h>
+#  include <deal.II/lac/petsc_block_vector.h>
+#  include <deal.II/lac/petsc_sparse_matrix.h>
+
+#  include <cmath>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+
+namespace PETScWrappers
+{
+  namespace MPI
+  {
+    /*! @addtogroup PETScWrappers
+     *@{
+     */
+
+    /**
+     * Blocked sparse matrix based on the PETScWrappers::MPI::SparseMatrix
+     * class. This class implements the functions that are specific to the
+     * PETSc SparseMatrix base objects for a blocked sparse matrix, and leaves
+     * the actual work relaying most of the calls to the individual blocks to
+     * the functions implemented in the base class. See there also for a
+     * description of when this class is useful.
+     *
+     * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices
+     * do not have external objects for the sparsity patterns. Thus, one does
+     * not determine the size of the individual blocks of a block matrix of
+     * this type by attaching a block sparsity pattern, but by calling
+     * reinit() to set the number of blocks and then by setting the size of
+     * each block separately. In order to fix the data structures of the block
+     * matrix, it is then necessary to let it know that we have changed the
+     * sizes of the underlying matrices. For this, one has to call the
+     * collect_sizes() function, for much the same reason as is documented
+     * with the BlockSparsityPattern class.
+     *
+     * @ingroup Matrix1 @see
+     * @ref GlossBlockLA "Block (linear algebra)"
+     * @author Wolfgang Bangerth, 2004
+     */
+    class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
+    {
+    public:
+      /**
+       * Typedef the base class for simpler access to its own alias.
+       */
+      using BaseClass = BlockMatrixBase<SparseMatrix>;
+
+      /**
+       * Typedef the type of the underlying matrix.
+       */
+      using BlockType = BaseClass::BlockType;
+
+      /**
+       * Import the alias from the base class.
+       */
+      using value_type      = BaseClass::value_type;
+      using pointer         = BaseClass::pointer;
+      using const_pointer   = BaseClass::const_pointer;
+      using reference       = BaseClass::reference;
+      using const_reference = BaseClass::const_reference;
+      using size_type       = BaseClass::size_type;
+      using iterator        = BaseClass::iterator;
+      using const_iterator  = BaseClass::const_iterator;
+
+      /**
+       * Constructor; initializes the matrix to be empty, without any
+       * structure, i.e.  the matrix is not usable at all. This constructor is
+       * therefore only useful for matrices which are members of a class. All
+       * other matrices should be created at a point in the data flow where
+       * all necessary information is available.
+       *
+       * You have to initialize the matrix before usage with
+       * reinit(BlockSparsityPattern). The number of blocks per row and column
+       * are then determined by that function.
+       */
+      BlockSparseMatrix() = default;
+
+      /**
+       * Destructor.
+       */
+      ~BlockSparseMatrix() override = default;
+
+      /**
+       * Pseudo copy operator only copying empty objects. The sizes of the
+       * block matrices need to be the same.
+       */
+      BlockSparseMatrix &
+      operator=(const BlockSparseMatrix &);
+
+      /**
+       * This operator assigns a scalar to a matrix. Since this does usually
+       * not make much sense (should we set all matrix entries to this value?
+       * Only the nonzero entries of the sparsity pattern?), this operation is
+       * only allowed if the actual value to be assigned is zero. This
+       * operator only exists to allow for the obvious notation
+       * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
+       * keep the sparsity pattern previously used.
+       */
+      BlockSparseMatrix &
+      operator=(const double d);
+
+      /**
+       * Resize the matrix, by setting the number of block rows and columns.
+       * This deletes all blocks and replaces them with uninitialized ones,
+       * i.e.  ones for which also the sizes are not yet set. You have to do
+       * that by calling the @p reinit functions of the blocks themselves. Do
+       * not forget to call collect_sizes() after that on this object.
+       *
+       * The reason that you have to set sizes of the blocks yourself is that
+       * the sizes may be varying, the maximum number of elements per row may
+       * be varying, etc. It is simpler not to reproduce the interface of the
+       * SparsityPattern class here but rather let the user call whatever
+       * function she desires.
+       */
+      void
+      reinit(const size_type n_block_rows, const size_type n_block_columns);
+
+
+      /**
+       * Efficiently reinit the block matrix for a parallel computation. Only
+       * the BlockSparsityPattern of the Simple type can efficiently store
+       * large sparsity patterns in parallel, so this is the only supported
+       * argument. The IndexSets describe the locally owned range of DoFs for
+       * each block. Note that the IndexSets needs to be ascending and 1:1.
+       * For a symmetric structure hand in the same vector for the first two
+       * arguments.
+       */
+      void
+      reinit(const std::vector<IndexSet> &      rows,
+             const std::vector<IndexSet> &      cols,
+             const BlockDynamicSparsityPattern &bdsp,
+             const MPI_Comm &                   com);
+
+
+      /**
+       * Same as above but for a symmetric structure only.
+       */
+      void
+      reinit(const std::vector<IndexSet> &      sizes,
+             const BlockDynamicSparsityPattern &bdsp,
+             const MPI_Comm &                   com);
+
+
+
+      /**
+       * Matrix-vector multiplication: let $dst = M*src$ with $M$ being this
+       * matrix.
+       */
+      void
+      vmult(BlockVector &dst, const BlockVector &src) const;
+
+      /**
+       * Matrix-vector multiplication. Just like the previous function, but
+       * only applicable if the matrix has only one block column.
+       */
+      void
+      vmult(BlockVector &dst, const Vector &src) const;
+
+      /**
+       * Matrix-vector multiplication. Just like the previous function, but
+       * only applicable if the matrix has only one block row.
+       */
+      void
+      vmult(Vector &dst, const BlockVector &src) const;
+
+      /**
+       * Matrix-vector multiplication. Just like the previous function, but
+       * only applicable if the matrix has only one block.
+       */
+      void
+      vmult(Vector &dst, const Vector &src) const;
+
+      /**
+       * Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this
+       * matrix. This function does the same as vmult() but takes the
+       * transposed matrix.
+       */
+      void
+      Tvmult(BlockVector &dst, const BlockVector &src) const;
+
+      /**
+       * Matrix-vector multiplication. Just like the previous function, but
+       * only applicable if the matrix has only one block row.
+       */
+      void
+      Tvmult(BlockVector &dst, const Vector &src) const;
+
+      /**
+       * Matrix-vector multiplication. Just like the previous function, but
+       * only applicable if the matrix has only one block column.
+       */
+      void
+      Tvmult(Vector &dst, const BlockVector &src) const;
+
+      /**
+       * Matrix-vector multiplication. Just like the previous function, but
+       * only applicable if the matrix has only one block.
+       */
+      void
+      Tvmult(Vector &dst, const Vector &src) const;
+
+      /**
+       * This function collects the sizes of the sub-objects and stores them
+       * in internal arrays, in order to be able to relay global indices into
+       * the matrix to indices into the subobjects. You *must* call this
+       * function each time after you have changed the size of the sub-
+       * objects.
+       */
+      void
+      collect_sizes();
+
+      /**
+       * Return the partitioning of the domain space of this matrix, i.e., the
+       * partitioning of the vectors this matrix has to be multiplied with.
+       */
+      std::vector<IndexSet>
+      locally_owned_domain_indices() const;
+
+      /**
+       * Return the partitioning of the range space of this matrix, i.e., the
+       * partitioning of the vectors that are result from matrix-vector
+       * products.
+       */
+      std::vector<IndexSet>
+      locally_owned_range_indices() const;
+
+      /**
+       * Return a reference to the MPI communicator object in use with this
+       * matrix.
+       */
+      const MPI_Comm &
+      get_mpi_communicator() const;
+
+      /**
+       * Make the clear() function in the base class visible, though it is
+       * protected.
+       */
+      using BlockMatrixBase<SparseMatrix>::clear;
+    };
+
+
+
+    /*@}*/
+
+    // ------------- inline and template functions -----------------
+
+    inline BlockSparseMatrix &
+    BlockSparseMatrix::operator=(const double d)
+    {
+      Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue());
+
+      for (size_type r = 0; r < this->n_block_rows(); ++r)
+        for (size_type c = 0; c < this->n_block_cols(); ++c)
+          this->block(r, c) = d;
+
+      return *this;
+    }
+
+
+
+    inline void
+    BlockSparseMatrix::vmult(BlockVector &dst, const BlockVector &src) const
+    {
+      BaseClass::vmult_block_block(dst, src);
+    }
+
+
+
+    inline void
+    BlockSparseMatrix::vmult(BlockVector &dst, const Vector &src) const
+    {
+      BaseClass::vmult_block_nonblock(dst, src);
+    }
+
+
+
+    inline void
+    BlockSparseMatrix::vmult(Vector &dst, const BlockVector &src) const
+    {
+      BaseClass::vmult_nonblock_block(dst, src);
+    }
+
+
+
+    inline void
+    BlockSparseMatrix::vmult(Vector &dst, const Vector &src) const
+    {
+      BaseClass::vmult_nonblock_nonblock(dst, src);
+    }
+
+
+    inline void
+    BlockSparseMatrix::Tvmult(BlockVector &dst, const BlockVector &src) const
+    {
+      BaseClass::Tvmult_block_block(dst, src);
+    }
+
+
+
+    inline void
+    BlockSparseMatrix::Tvmult(BlockVector &dst, const Vector &src) const
+    {
+      BaseClass::Tvmult_block_nonblock(dst, src);
+    }
+
+
+
+    inline void
+    BlockSparseMatrix::Tvmult(Vector &dst, const BlockVector &src) const
+    {
+      BaseClass::Tvmult_nonblock_block(dst, src);
+    }
+
+
+
+    inline void
+    BlockSparseMatrix::Tvmult(Vector &dst, const Vector &src) const
+    {
+      BaseClass::Tvmult_nonblock_nonblock(dst, src);
+    }
+
+  } // namespace MPI
+
+} // namespace PETScWrappers
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+
+#endif // DEAL_II_WITH_PETSC
+
+#endif // dealii_petsc_block_sparse_matrix_h
diff --git a/include/deal.II/lac/petsc_block_vector.h b/include/deal.II/lac/petsc_block_vector.h
new file mode 100644 (file)
index 0000000..9420e4d
--- /dev/null
@@ -0,0 +1,564 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_petsc_block_vector_h
+#define dealii_petsc_block_vector_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_PETSC
+
+#  include <deal.II/lac/block_indices.h>
+#  include <deal.II/lac/block_vector_base.h>
+#  include <deal.II/lac/exceptions.h>
+#  include <deal.II/lac/petsc_vector.h>
+#  include <deal.II/lac/vector_type_traits.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace PETScWrappers
+{
+  // forward declaration
+  class BlockVector;
+
+  namespace MPI
+  {
+    /*! @addtogroup PETScWrappers
+     *@{
+     */
+
+    /**
+     * An implementation of block vectors based on the parallel vector class
+     * implemented in PETScWrappers. While the base class provides for most of
+     * the interface, this class handles the actual allocation of vectors and
+     * provides functions that are specific to the underlying vector type.
+     *
+     * The model of distribution of data is such that each of the blocks is
+     * distributed across all MPI processes named in the MPI communicator.
+     * I.e. we don't just distribute the whole vector, but each component. In
+     * the constructors and reinit() functions, one therefore not only has to
+     * specify the sizes of the individual blocks, but also the number of
+     * elements of each of these blocks to be stored on the local process.
+     *
+     * @ingroup Vectors @see
+     * @ref GlossBlockLA "Block (linear algebra)"
+     * @author Wolfgang Bangerth, 2004
+     */
+    class BlockVector : public BlockVectorBase<Vector>
+    {
+    public:
+      /**
+       * Typedef the base class for simpler access to its own alias.
+       */
+      using BaseClass = BlockVectorBase<Vector>;
+
+      /**
+       * Typedef the type of the underlying vector.
+       */
+      using BlockType = BaseClass::BlockType;
+
+      /**
+       * Import the alias from the base class.
+       */
+      using value_type      = BaseClass::value_type;
+      using pointer         = BaseClass::pointer;
+      using const_pointer   = BaseClass::const_pointer;
+      using reference       = BaseClass::reference;
+      using const_reference = BaseClass::const_reference;
+      using size_type       = BaseClass::size_type;
+      using iterator        = BaseClass::iterator;
+      using const_iterator  = BaseClass::const_iterator;
+
+      /**
+       * Default constructor. Generate an empty vector without any blocks.
+       */
+      BlockVector() = default;
+
+      /**
+       * Constructor. Generate a block vector with @p n_blocks blocks, each of
+       * which is a parallel vector across @p communicator with @p block_size
+       * elements of which @p local_size elements are stored on the present
+       * process.
+       */
+      explicit BlockVector(const unsigned int n_blocks,
+                           const MPI_Comm &   communicator,
+                           const size_type    block_size,
+                           const size_type    local_size);
+
+      /**
+       * Copy constructor. Set all the properties of the parallel vector to
+       * those of the given argument and copy the elements.
+       */
+      BlockVector(const BlockVector &V);
+
+      /**
+       * Constructor. Set the number of blocks to <tt>block_sizes.size()</tt>
+       * and initialize each block with <tt>block_sizes[i]</tt> zero elements.
+       * The individual blocks are distributed across the given communicator,
+       * and each store <tt>local_elements[i]</tt> elements on the present
+       * process.
+       */
+      BlockVector(const std::vector<size_type> &block_sizes,
+                  const MPI_Comm &              communicator,
+                  const std::vector<size_type> &local_elements);
+
+      /**
+       * Create a BlockVector with parallel_partitioning.size() blocks, each
+       * initialized with the given IndexSet.
+       */
+      explicit BlockVector(const std::vector<IndexSet> &parallel_partitioning,
+                           const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+      /**
+       * Same as above, but include ghost elements
+       */
+      BlockVector(const std::vector<IndexSet> &parallel_partitioning,
+                  const std::vector<IndexSet> &ghost_indices,
+                  const MPI_Comm &             communicator);
+
+
+
+      /**
+       * Destructor. Clears memory
+       */
+      ~BlockVector() override = default;
+
+      /**
+       * Copy operator: fill all components of the vector that are locally
+       * stored with the given scalar value.
+       */
+      BlockVector &
+      operator=(const value_type s);
+
+      /**
+       * Copy operator for arguments of the same type.
+       */
+      BlockVector &
+      operator=(const BlockVector &V);
+
+      /**
+       * Reinitialize the BlockVector to contain @p n_blocks of size @p
+       * block_size, each of which stores @p local_size elements locally. The
+       * @p communicator argument denotes which MPI channel each of these
+       * blocks shall communicate.
+       *
+       * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
+       * zeros.
+       */
+      void
+      reinit(const unsigned int n_blocks,
+             const MPI_Comm &   communicator,
+             const size_type    block_size,
+             const size_type    local_size,
+             const bool         omit_zeroing_entries = false);
+
+      /**
+       * Reinitialize the BlockVector such that it contains
+       * <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
+       * dimension <tt>block_sizes[i]</tt>. Each of them stores
+       * <tt>local_sizes[i]</tt> elements on the present process.
+       *
+       * If the number of blocks is the same as before this function was
+       * called, all vectors remain the same and reinit() is called for each
+       * vector.
+       *
+       * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
+       * zeros.
+       *
+       * Note that you must call this (or the other reinit() functions)
+       * function, rather than calling the reinit() functions of an individual
+       * block, to allow the block vector to update its caches of vector
+       * sizes. If you call reinit() of one of the blocks, then subsequent
+       * actions on this object may yield unpredictable results since they may
+       * be routed to the wrong block.
+       */
+      void
+      reinit(const std::vector<size_type> &block_sizes,
+             const MPI_Comm &              communicator,
+             const std::vector<size_type> &local_sizes,
+             const bool                    omit_zeroing_entries = false);
+
+      /**
+       * Change the dimension to that of the vector <tt>V</tt>. The same
+       * applies as for the other reinit() function.
+       *
+       * The elements of <tt>V</tt> are not copied, i.e.  this function is the
+       * same as calling <tt>reinit (V.size(), omit_zeroing_entries)</tt>.
+       *
+       * Note that you must call this (or the other reinit() functions)
+       * function, rather than calling the reinit() functions of an individual
+       * block, to allow the block vector to update its caches of vector
+       * sizes. If you call reinit() on one of the blocks, then subsequent
+       * actions on this object may yield unpredictable results since they may
+       * be routed to the wrong block.
+       */
+      void
+      reinit(const BlockVector &V, const bool omit_zeroing_entries = false);
+
+      /**
+       * Reinitialize the BlockVector using IndexSets. See the constructor
+       * with the same arguments for details.
+       */
+      void
+      reinit(const std::vector<IndexSet> &parallel_partitioning,
+             const MPI_Comm &             communicator);
+
+      /**
+       * Same as above but include ghost entries.
+       */
+      void
+      reinit(const std::vector<IndexSet> &parallel_partitioning,
+             const std::vector<IndexSet> &ghost_entries,
+             const MPI_Comm &             communicator);
+
+      /**
+       * Change the number of blocks to <tt>num_blocks</tt>. The individual
+       * blocks will get initialized with zero size, so it is assumed that the
+       * user resizes the individual blocks by herself in an appropriate way,
+       * and calls <tt>collect_sizes</tt> afterwards.
+       */
+      void
+      reinit(const unsigned int num_blocks);
+
+      /**
+       * Return if this vector is a ghosted vector (and thus read-only).
+       */
+      bool
+      has_ghost_elements() const;
+
+      /**
+       * Return a reference to the MPI communicator object in use with this
+       * vector.
+       */
+      const MPI_Comm &
+      get_mpi_communicator() const;
+
+      /**
+       * Swap the contents of this vector and the other vector <tt>v</tt>. One
+       * could do this operation with a temporary variable and copying over
+       * the data elements, but this function is significantly more efficient
+       * since it only swaps the pointers to the data of the two vectors and
+       * therefore does not need to allocate temporary storage and move data
+       * around.
+       *
+       * Limitation: right now this function only works if both vectors have
+       * the same number of blocks. If needed, the numbers of blocks should be
+       * exchanged, too.
+       *
+       * This function is analogous to the swap() function of all C++
+       * standard containers. Also, there is a global function swap(u,v) that
+       * simply calls <tt>u.swap(v)</tt>, again in analogy to standard
+       * functions.
+       */
+      void
+      swap(BlockVector &v);
+
+      /**
+       * Print to a stream.
+       */
+      void
+      print(std::ostream &     out,
+            const unsigned int precision  = 3,
+            const bool         scientific = true,
+            const bool         across     = true) const;
+
+      /**
+       * Exception
+       */
+      DeclException0(ExcIteratorRangeDoesNotMatchVectorSize);
+      /**
+       * Exception
+       */
+      DeclException0(ExcNonMatchingBlockVectors);
+    };
+
+    /*@}*/
+
+    /*--------------------- Inline functions --------------------------------*/
+
+    inline BlockVector::BlockVector(const unsigned int n_blocks,
+                                    const MPI_Comm &   communicator,
+                                    const size_type    block_size,
+                                    const size_type    local_size)
+    {
+      reinit(n_blocks, communicator, block_size, local_size);
+    }
+
+
+
+    inline BlockVector::BlockVector(
+      const std::vector<size_type> &block_sizes,
+      const MPI_Comm &              communicator,
+      const std::vector<size_type> &local_elements)
+    {
+      reinit(block_sizes, communicator, local_elements, false);
+    }
+
+
+    inline BlockVector::BlockVector(const BlockVector &v)
+      : BlockVectorBase<Vector>()
+    {
+      this->components.resize(v.n_blocks());
+      this->block_indices = v.block_indices;
+
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        this->components[i] = v.components[i];
+    }
+
+    inline BlockVector::BlockVector(
+      const std::vector<IndexSet> &parallel_partitioning,
+      const MPI_Comm &             communicator)
+    {
+      reinit(parallel_partitioning, communicator);
+    }
+
+    inline BlockVector::BlockVector(
+      const std::vector<IndexSet> &parallel_partitioning,
+      const std::vector<IndexSet> &ghost_indices,
+      const MPI_Comm &             communicator)
+    {
+      reinit(parallel_partitioning, ghost_indices, communicator);
+    }
+
+    inline BlockVector &
+    BlockVector::operator=(const value_type s)
+    {
+      BaseClass::operator=(s);
+      return *this;
+    }
+
+    inline BlockVector &
+    BlockVector::operator=(const BlockVector &v)
+    {
+      // we only allow assignment to vectors with the same number of blocks
+      // or to an empty BlockVector
+      Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
+             ExcDimensionMismatch(n_blocks(), v.n_blocks()));
+
+      if (this->n_blocks() != v.n_blocks())
+        reinit(v.n_blocks());
+
+      for (size_type i = 0; i < this->n_blocks(); ++i)
+        this->components[i] = v.block(i);
+
+      collect_sizes();
+
+      return *this;
+    }
+
+
+
+    inline void
+    BlockVector::reinit(const unsigned int n_blocks,
+                        const MPI_Comm &   communicator,
+                        const size_type    block_size,
+                        const size_type    local_size,
+                        const bool         omit_zeroing_entries)
+    {
+      reinit(std::vector<size_type>(n_blocks, block_size),
+             communicator,
+             std::vector<size_type>(n_blocks, local_size),
+             omit_zeroing_entries);
+    }
+
+
+
+    inline void
+    BlockVector::reinit(const std::vector<size_type> &block_sizes,
+                        const MPI_Comm &              communicator,
+                        const std::vector<size_type> &local_sizes,
+                        const bool                    omit_zeroing_entries)
+    {
+      this->block_indices.reinit(block_sizes);
+      if (this->components.size() != this->n_blocks())
+        this->components.resize(this->n_blocks());
+
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        this->components[i].reinit(communicator,
+                                   block_sizes[i],
+                                   local_sizes[i],
+                                   omit_zeroing_entries);
+    }
+
+
+    inline void
+    BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
+    {
+      this->block_indices = v.get_block_indices();
+      if (this->components.size() != this->n_blocks())
+        this->components.resize(this->n_blocks());
+
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        block(i).reinit(v.block(i), omit_zeroing_entries);
+    }
+
+    inline void
+    BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
+                        const MPI_Comm &             communicator)
+    {
+      std::vector<size_type> sizes(parallel_partitioning.size());
+      for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
+        sizes[i] = parallel_partitioning[i].size();
+
+      this->block_indices.reinit(sizes);
+      if (this->components.size() != this->n_blocks())
+        this->components.resize(this->n_blocks());
+
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        block(i).reinit(parallel_partitioning[i], communicator);
+    }
+
+    inline void
+    BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
+                        const std::vector<IndexSet> &ghost_entries,
+                        const MPI_Comm &             communicator)
+    {
+      std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
+      for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
+        sizes[i] = parallel_partitioning[i].size();
+
+      this->block_indices.reinit(sizes);
+      if (this->components.size() != this->n_blocks())
+        this->components.resize(this->n_blocks());
+
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        block(i).reinit(parallel_partitioning[i],
+                        ghost_entries[i],
+                        communicator);
+    }
+
+
+
+    inline const MPI_Comm &
+    BlockVector::get_mpi_communicator() const
+    {
+      return block(0).get_mpi_communicator();
+    }
+
+    inline bool
+    BlockVector::has_ghost_elements() const
+    {
+      bool ghosted = block(0).has_ghost_elements();
+#  ifdef DEBUG
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError());
+#  endif
+      return ghosted;
+    }
+
+
+    inline void
+    BlockVector::swap(BlockVector &v)
+    {
+      std::swap(this->components, v.components);
+
+      ::dealii::swap(this->block_indices, v.block_indices);
+    }
+
+
+
+    inline void
+    BlockVector::print(std::ostream &     out,
+                       const unsigned int precision,
+                       const bool         scientific,
+                       const bool         across) const
+    {
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        {
+          if (across)
+            out << 'C' << i << ':';
+          else
+            out << "Component " << i << std::endl;
+          this->components[i].print(out, precision, scientific, across);
+        }
+    }
+
+
+
+    /**
+     * Global function which overloads the default implementation of the C++
+     * standard library which uses a temporary object. The function simply
+     * exchanges the data of the two vectors.
+     *
+     * @relatesalso PETScWrappers::MPI::BlockVector
+     * @author Wolfgang Bangerth, 2000
+     */
+    inline void
+    swap(BlockVector &u, BlockVector &v)
+    {
+      u.swap(v);
+    }
+
+  } // namespace MPI
+
+} // namespace PETScWrappers
+
+namespace internal
+{
+  namespace LinearOperatorImplementation
+  {
+    template <typename>
+    class ReinitHelper;
+
+    /**
+     * A helper class used internally in linear_operator.h. Specialization for
+     * PETScWrappers::MPI::BlockVector.
+     */
+    template <>
+    class ReinitHelper<PETScWrappers::MPI::BlockVector>
+    {
+    public:
+      template <typename Matrix>
+      static void
+      reinit_range_vector(const Matrix &                   matrix,
+                          PETScWrappers::MPI::BlockVector &v,
+                          bool /*omit_zeroing_entries*/)
+      {
+        v.reinit(matrix.locally_owned_range_indices(),
+                 matrix.get_mpi_communicator());
+      }
+
+      template <typename Matrix>
+      static void
+      reinit_domain_vector(const Matrix &                   matrix,
+                           PETScWrappers::MPI::BlockVector &v,
+                           bool /*omit_zeroing_entries*/)
+      {
+        v.reinit(matrix.locally_owned_domain_indices(),
+                 matrix.get_mpi_communicator());
+      }
+    };
+
+  } // namespace LinearOperatorImplementation
+} /* namespace internal */
+
+
+/**
+ * Declare dealii::PETScWrappers::MPI::BlockVector as distributed vector.
+ *
+ * @author Uwe Koecher, 2017
+ */
+template <>
+struct is_serial_vector<PETScWrappers::MPI::BlockVector> : std::false_type
+{};
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_WITH_PETSC
+
+#endif
index ff3aa5c44caeaf10657c85ed2c8b0a7e7960a12a..2b41841a025b5debf971126726d103f7daedacd3 100644 (file)
@@ -22,7 +22,7 @@
 #  ifdef DEAL_II_WITH_PETSC
 #    include <deal.II/lac/exceptions.h>
 #    include <deal.II/lac/petsc_matrix_base.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 DEAL_II_NAMESPACE_OPEN
 
 
index 4502c924c152eccc1880f13df20bde17745490fa..617a95a8591bad21a1bae9bb1dd03da97d761fc7 100644 (file)
 
 #include <deal.II/base/config.h>
 
-#ifdef DEAL_II_WITH_PETSC
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
 
-#  include <deal.II/base/table.h>
-
-#  include <deal.II/lac/block_matrix_base.h>
-#  include <deal.II/lac/block_sparsity_pattern.h>
-#  include <deal.II/lac/exceptions.h>
-#  include <deal.II/lac/petsc_parallel_block_vector.h>
-#  include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-
-#  include <cmath>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-
-namespace PETScWrappers
-{
-  namespace MPI
-  {
-    /*! @addtogroup PETScWrappers
-     *@{
-     */
-
-    /**
-     * Blocked sparse matrix based on the PETScWrappers::MPI::SparseMatrix
-     * class. This class implements the functions that are specific to the
-     * PETSc SparseMatrix base objects for a blocked sparse matrix, and leaves
-     * the actual work relaying most of the calls to the individual blocks to
-     * the functions implemented in the base class. See there also for a
-     * description of when this class is useful.
-     *
-     * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices
-     * do not have external objects for the sparsity patterns. Thus, one does
-     * not determine the size of the individual blocks of a block matrix of
-     * this type by attaching a block sparsity pattern, but by calling
-     * reinit() to set the number of blocks and then by setting the size of
-     * each block separately. In order to fix the data structures of the block
-     * matrix, it is then necessary to let it know that we have changed the
-     * sizes of the underlying matrices. For this, one has to call the
-     * collect_sizes() function, for much the same reason as is documented
-     * with the BlockSparsityPattern class.
-     *
-     * @ingroup Matrix1 @see
-     * @ref GlossBlockLA "Block (linear algebra)"
-     * @author Wolfgang Bangerth, 2004
-     */
-    class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
-    {
-    public:
-      /**
-       * Typedef the base class for simpler access to its own alias.
-       */
-      using BaseClass = BlockMatrixBase<SparseMatrix>;
-
-      /**
-       * Typedef the type of the underlying matrix.
-       */
-      using BlockType = BaseClass::BlockType;
-
-      /**
-       * Import the alias from the base class.
-       */
-      using value_type      = BaseClass::value_type;
-      using pointer         = BaseClass::pointer;
-      using const_pointer   = BaseClass::const_pointer;
-      using reference       = BaseClass::reference;
-      using const_reference = BaseClass::const_reference;
-      using size_type       = BaseClass::size_type;
-      using iterator        = BaseClass::iterator;
-      using const_iterator  = BaseClass::const_iterator;
-
-      /**
-       * Constructor; initializes the matrix to be empty, without any
-       * structure, i.e.  the matrix is not usable at all. This constructor is
-       * therefore only useful for matrices which are members of a class. All
-       * other matrices should be created at a point in the data flow where
-       * all necessary information is available.
-       *
-       * You have to initialize the matrix before usage with
-       * reinit(BlockSparsityPattern). The number of blocks per row and column
-       * are then determined by that function.
-       */
-      BlockSparseMatrix() = default;
-
-      /**
-       * Destructor.
-       */
-      ~BlockSparseMatrix() override = default;
-
-      /**
-       * Pseudo copy operator only copying empty objects. The sizes of the
-       * block matrices need to be the same.
-       */
-      BlockSparseMatrix &
-      operator=(const BlockSparseMatrix &);
-
-      /**
-       * This operator assigns a scalar to a matrix. Since this does usually
-       * not make much sense (should we set all matrix entries to this value?
-       * Only the nonzero entries of the sparsity pattern?), this operation is
-       * only allowed if the actual value to be assigned is zero. This
-       * operator only exists to allow for the obvious notation
-       * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
-       * keep the sparsity pattern previously used.
-       */
-      BlockSparseMatrix &
-      operator=(const double d);
-
-      /**
-       * Resize the matrix, by setting the number of block rows and columns.
-       * This deletes all blocks and replaces them with uninitialized ones,
-       * i.e.  ones for which also the sizes are not yet set. You have to do
-       * that by calling the @p reinit functions of the blocks themselves. Do
-       * not forget to call collect_sizes() after that on this object.
-       *
-       * The reason that you have to set sizes of the blocks yourself is that
-       * the sizes may be varying, the maximum number of elements per row may
-       * be varying, etc. It is simpler not to reproduce the interface of the
-       * SparsityPattern class here but rather let the user call whatever
-       * function she desires.
-       */
-      void
-      reinit(const size_type n_block_rows, const size_type n_block_columns);
-
-
-      /**
-       * Efficiently reinit the block matrix for a parallel computation. Only
-       * the BlockSparsityPattern of the Simple type can efficiently store
-       * large sparsity patterns in parallel, so this is the only supported
-       * argument. The IndexSets describe the locally owned range of DoFs for
-       * each block. Note that the IndexSets needs to be ascending and 1:1.
-       * For a symmetric structure hand in the same vector for the first two
-       * arguments.
-       */
-      void
-      reinit(const std::vector<IndexSet> &      rows,
-             const std::vector<IndexSet> &      cols,
-             const BlockDynamicSparsityPattern &bdsp,
-             const MPI_Comm &                   com);
-
-
-      /**
-       * Same as above but for a symmetric structure only.
-       */
-      void
-      reinit(const std::vector<IndexSet> &      sizes,
-             const BlockDynamicSparsityPattern &bdsp,
-             const MPI_Comm &                   com);
-
-
-
-      /**
-       * Matrix-vector multiplication: let $dst = M*src$ with $M$ being this
-       * matrix.
-       */
-      void
-      vmult(BlockVector &dst, const BlockVector &src) const;
-
-      /**
-       * Matrix-vector multiplication. Just like the previous function, but
-       * only applicable if the matrix has only one block column.
-       */
-      void
-      vmult(BlockVector &dst, const Vector &src) const;
-
-      /**
-       * Matrix-vector multiplication. Just like the previous function, but
-       * only applicable if the matrix has only one block row.
-       */
-      void
-      vmult(Vector &dst, const BlockVector &src) const;
-
-      /**
-       * Matrix-vector multiplication. Just like the previous function, but
-       * only applicable if the matrix has only one block.
-       */
-      void
-      vmult(Vector &dst, const Vector &src) const;
-
-      /**
-       * Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this
-       * matrix. This function does the same as vmult() but takes the
-       * transposed matrix.
-       */
-      void
-      Tvmult(BlockVector &dst, const BlockVector &src) const;
-
-      /**
-       * Matrix-vector multiplication. Just like the previous function, but
-       * only applicable if the matrix has only one block row.
-       */
-      void
-      Tvmult(BlockVector &dst, const Vector &src) const;
-
-      /**
-       * Matrix-vector multiplication. Just like the previous function, but
-       * only applicable if the matrix has only one block column.
-       */
-      void
-      Tvmult(Vector &dst, const BlockVector &src) const;
-
-      /**
-       * Matrix-vector multiplication. Just like the previous function, but
-       * only applicable if the matrix has only one block.
-       */
-      void
-      Tvmult(Vector &dst, const Vector &src) const;
-
-      /**
-       * This function collects the sizes of the sub-objects and stores them
-       * in internal arrays, in order to be able to relay global indices into
-       * the matrix to indices into the subobjects. You *must* call this
-       * function each time after you have changed the size of the sub-
-       * objects.
-       */
-      void
-      collect_sizes();
-
-      /**
-       * Return the partitioning of the domain space of this matrix, i.e., the
-       * partitioning of the vectors this matrix has to be multiplied with.
-       */
-      std::vector<IndexSet>
-      locally_owned_domain_indices() const;
-
-      /**
-       * Return the partitioning of the range space of this matrix, i.e., the
-       * partitioning of the vectors that are result from matrix-vector
-       * products.
-       */
-      std::vector<IndexSet>
-      locally_owned_range_indices() const;
-
-      /**
-       * Return a reference to the MPI communicator object in use with this
-       * matrix.
-       */
-      const MPI_Comm &
-      get_mpi_communicator() const;
-
-      /**
-       * Make the clear() function in the base class visible, though it is
-       * protected.
-       */
-      using BlockMatrixBase<SparseMatrix>::clear;
-    };
-
-
-
-    /*@}*/
-
-    // ------------- inline and template functions -----------------
-
-    inline BlockSparseMatrix &
-    BlockSparseMatrix::operator=(const double d)
-    {
-      Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue());
-
-      for (size_type r = 0; r < this->n_block_rows(); ++r)
-        for (size_type c = 0; c < this->n_block_cols(); ++c)
-          this->block(r, c) = d;
-
-      return *this;
-    }
-
-
-
-    inline void
-    BlockSparseMatrix::vmult(BlockVector &dst, const BlockVector &src) const
-    {
-      BaseClass::vmult_block_block(dst, src);
-    }
-
-
-
-    inline void
-    BlockSparseMatrix::vmult(BlockVector &dst, const Vector &src) const
-    {
-      BaseClass::vmult_block_nonblock(dst, src);
-    }
-
-
-
-    inline void
-    BlockSparseMatrix::vmult(Vector &dst, const BlockVector &src) const
-    {
-      BaseClass::vmult_nonblock_block(dst, src);
-    }
-
-
-
-    inline void
-    BlockSparseMatrix::vmult(Vector &dst, const Vector &src) const
-    {
-      BaseClass::vmult_nonblock_nonblock(dst, src);
-    }
-
-
-    inline void
-    BlockSparseMatrix::Tvmult(BlockVector &dst, const BlockVector &src) const
-    {
-      BaseClass::Tvmult_block_block(dst, src);
-    }
-
-
-
-    inline void
-    BlockSparseMatrix::Tvmult(BlockVector &dst, const Vector &src) const
-    {
-      BaseClass::Tvmult_block_nonblock(dst, src);
-    }
-
-
-
-    inline void
-    BlockSparseMatrix::Tvmult(Vector &dst, const BlockVector &src) const
-    {
-      BaseClass::Tvmult_nonblock_block(dst, src);
-    }
-
-
-
-    inline void
-    BlockSparseMatrix::Tvmult(Vector &dst, const Vector &src) const
-    {
-      BaseClass::Tvmult_nonblock_nonblock(dst, src);
-    }
-
-  } // namespace MPI
-
-} // namespace PETScWrappers
-
-
-DEAL_II_NAMESPACE_CLOSE
-
-
-#endif // DEAL_II_WITH_PETSC
+#pragma DEAL_II_WARNING( \
+  "This file is deprecated. Use deal.II/lac/petsc_block_sparse_matrix.h instead!")
 
 #endif // dealii_petsc_parallel_block_sparse_matrix_h
index 56206b91e88795982df466ee8c8868572c3cabdd..5a3ff99e34137e5ccfe34c09bf1a27422433bf23 100644 (file)
 
 #include <deal.II/base/config.h>
 
-#ifdef DEAL_II_WITH_PETSC
+#include <deal.II/lac/petsc_block_vector.h>
 
-#  include <deal.II/lac/block_indices.h>
-#  include <deal.II/lac/block_vector_base.h>
-#  include <deal.II/lac/exceptions.h>
-#  include <deal.II/lac/petsc_parallel_vector.h>
-#  include <deal.II/lac/vector_type_traits.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-namespace PETScWrappers
-{
-  // forward declaration
-  class BlockVector;
-
-  namespace MPI
-  {
-    /*! @addtogroup PETScWrappers
-     *@{
-     */
-
-    /**
-     * An implementation of block vectors based on the parallel vector class
-     * implemented in PETScWrappers. While the base class provides for most of
-     * the interface, this class handles the actual allocation of vectors and
-     * provides functions that are specific to the underlying vector type.
-     *
-     * The model of distribution of data is such that each of the blocks is
-     * distributed across all MPI processes named in the MPI communicator.
-     * I.e. we don't just distribute the whole vector, but each component. In
-     * the constructors and reinit() functions, one therefore not only has to
-     * specify the sizes of the individual blocks, but also the number of
-     * elements of each of these blocks to be stored on the local process.
-     *
-     * @ingroup Vectors @see
-     * @ref GlossBlockLA "Block (linear algebra)"
-     * @author Wolfgang Bangerth, 2004
-     */
-    class BlockVector : public BlockVectorBase<Vector>
-    {
-    public:
-      /**
-       * Typedef the base class for simpler access to its own alias.
-       */
-      using BaseClass = BlockVectorBase<Vector>;
-
-      /**
-       * Typedef the type of the underlying vector.
-       */
-      using BlockType = BaseClass::BlockType;
-
-      /**
-       * Import the alias from the base class.
-       */
-      using value_type      = BaseClass::value_type;
-      using pointer         = BaseClass::pointer;
-      using const_pointer   = BaseClass::const_pointer;
-      using reference       = BaseClass::reference;
-      using const_reference = BaseClass::const_reference;
-      using size_type       = BaseClass::size_type;
-      using iterator        = BaseClass::iterator;
-      using const_iterator  = BaseClass::const_iterator;
-
-      /**
-       * Default constructor. Generate an empty vector without any blocks.
-       */
-      BlockVector() = default;
-
-      /**
-       * Constructor. Generate a block vector with @p n_blocks blocks, each of
-       * which is a parallel vector across @p communicator with @p block_size
-       * elements of which @p local_size elements are stored on the present
-       * process.
-       */
-      explicit BlockVector(const unsigned int n_blocks,
-                           const MPI_Comm &   communicator,
-                           const size_type    block_size,
-                           const size_type    local_size);
-
-      /**
-       * Copy constructor. Set all the properties of the parallel vector to
-       * those of the given argument and copy the elements.
-       */
-      BlockVector(const BlockVector &V);
-
-      /**
-       * Constructor. Set the number of blocks to <tt>block_sizes.size()</tt>
-       * and initialize each block with <tt>block_sizes[i]</tt> zero elements.
-       * The individual blocks are distributed across the given communicator,
-       * and each store <tt>local_elements[i]</tt> elements on the present
-       * process.
-       */
-      BlockVector(const std::vector<size_type> &block_sizes,
-                  const MPI_Comm &              communicator,
-                  const std::vector<size_type> &local_elements);
-
-      /**
-       * Create a BlockVector with parallel_partitioning.size() blocks, each
-       * initialized with the given IndexSet.
-       */
-      explicit BlockVector(const std::vector<IndexSet> &parallel_partitioning,
-                           const MPI_Comm &communicator = MPI_COMM_WORLD);
-
-      /**
-       * Same as above, but include ghost elements
-       */
-      BlockVector(const std::vector<IndexSet> &parallel_partitioning,
-                  const std::vector<IndexSet> &ghost_indices,
-                  const MPI_Comm &             communicator);
-
-
-
-      /**
-       * Destructor. Clears memory
-       */
-      ~BlockVector() override = default;
-
-      /**
-       * Copy operator: fill all components of the vector that are locally
-       * stored with the given scalar value.
-       */
-      BlockVector &
-      operator=(const value_type s);
-
-      /**
-       * Copy operator for arguments of the same type.
-       */
-      BlockVector &
-      operator=(const BlockVector &V);
-
-      /**
-       * Reinitialize the BlockVector to contain @p n_blocks of size @p
-       * block_size, each of which stores @p local_size elements locally. The
-       * @p communicator argument denotes which MPI channel each of these
-       * blocks shall communicate.
-       *
-       * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
-       * zeros.
-       */
-      void
-      reinit(const unsigned int n_blocks,
-             const MPI_Comm &   communicator,
-             const size_type    block_size,
-             const size_type    local_size,
-             const bool         omit_zeroing_entries = false);
-
-      /**
-       * Reinitialize the BlockVector such that it contains
-       * <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
-       * dimension <tt>block_sizes[i]</tt>. Each of them stores
-       * <tt>local_sizes[i]</tt> elements on the present process.
-       *
-       * If the number of blocks is the same as before this function was
-       * called, all vectors remain the same and reinit() is called for each
-       * vector.
-       *
-       * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
-       * zeros.
-       *
-       * Note that you must call this (or the other reinit() functions)
-       * function, rather than calling the reinit() functions of an individual
-       * block, to allow the block vector to update its caches of vector
-       * sizes. If you call reinit() of one of the blocks, then subsequent
-       * actions on this object may yield unpredictable results since they may
-       * be routed to the wrong block.
-       */
-      void
-      reinit(const std::vector<size_type> &block_sizes,
-             const MPI_Comm &              communicator,
-             const std::vector<size_type> &local_sizes,
-             const bool                    omit_zeroing_entries = false);
-
-      /**
-       * Change the dimension to that of the vector <tt>V</tt>. The same
-       * applies as for the other reinit() function.
-       *
-       * The elements of <tt>V</tt> are not copied, i.e.  this function is the
-       * same as calling <tt>reinit (V.size(), omit_zeroing_entries)</tt>.
-       *
-       * Note that you must call this (or the other reinit() functions)
-       * function, rather than calling the reinit() functions of an individual
-       * block, to allow the block vector to update its caches of vector
-       * sizes. If you call reinit() on one of the blocks, then subsequent
-       * actions on this object may yield unpredictable results since they may
-       * be routed to the wrong block.
-       */
-      void
-      reinit(const BlockVector &V, const bool omit_zeroing_entries = false);
-
-      /**
-       * Reinitialize the BlockVector using IndexSets. See the constructor
-       * with the same arguments for details.
-       */
-      void
-      reinit(const std::vector<IndexSet> &parallel_partitioning,
-             const MPI_Comm &             communicator);
-
-      /**
-       * Same as above but include ghost entries.
-       */
-      void
-      reinit(const std::vector<IndexSet> &parallel_partitioning,
-             const std::vector<IndexSet> &ghost_entries,
-             const MPI_Comm &             communicator);
-
-      /**
-       * Change the number of blocks to <tt>num_blocks</tt>. The individual
-       * blocks will get initialized with zero size, so it is assumed that the
-       * user resizes the individual blocks by herself in an appropriate way,
-       * and calls <tt>collect_sizes</tt> afterwards.
-       */
-      void
-      reinit(const unsigned int num_blocks);
-
-      /**
-       * Return if this vector is a ghosted vector (and thus read-only).
-       */
-      bool
-      has_ghost_elements() const;
-
-      /**
-       * Return a reference to the MPI communicator object in use with this
-       * vector.
-       */
-      const MPI_Comm &
-      get_mpi_communicator() const;
-
-      /**
-       * Swap the contents of this vector and the other vector <tt>v</tt>. One
-       * could do this operation with a temporary variable and copying over
-       * the data elements, but this function is significantly more efficient
-       * since it only swaps the pointers to the data of the two vectors and
-       * therefore does not need to allocate temporary storage and move data
-       * around.
-       *
-       * Limitation: right now this function only works if both vectors have
-       * the same number of blocks. If needed, the numbers of blocks should be
-       * exchanged, too.
-       *
-       * This function is analogous to the swap() function of all C++
-       * standard containers. Also, there is a global function swap(u,v) that
-       * simply calls <tt>u.swap(v)</tt>, again in analogy to standard
-       * functions.
-       */
-      void
-      swap(BlockVector &v);
-
-      /**
-       * Print to a stream.
-       */
-      void
-      print(std::ostream &     out,
-            const unsigned int precision  = 3,
-            const bool         scientific = true,
-            const bool         across     = true) const;
-
-      /**
-       * Exception
-       */
-      DeclException0(ExcIteratorRangeDoesNotMatchVectorSize);
-      /**
-       * Exception
-       */
-      DeclException0(ExcNonMatchingBlockVectors);
-    };
-
-    /*@}*/
-
-    /*--------------------- Inline functions --------------------------------*/
-
-    inline BlockVector::BlockVector(const unsigned int n_blocks,
-                                    const MPI_Comm &   communicator,
-                                    const size_type    block_size,
-                                    const size_type    local_size)
-    {
-      reinit(n_blocks, communicator, block_size, local_size);
-    }
-
-
-
-    inline BlockVector::BlockVector(
-      const std::vector<size_type> &block_sizes,
-      const MPI_Comm &              communicator,
-      const std::vector<size_type> &local_elements)
-    {
-      reinit(block_sizes, communicator, local_elements, false);
-    }
-
-
-    inline BlockVector::BlockVector(const BlockVector &v)
-      : BlockVectorBase<Vector>()
-    {
-      this->components.resize(v.n_blocks());
-      this->block_indices = v.block_indices;
-
-      for (unsigned int i = 0; i < this->n_blocks(); ++i)
-        this->components[i] = v.components[i];
-    }
-
-    inline BlockVector::BlockVector(
-      const std::vector<IndexSet> &parallel_partitioning,
-      const MPI_Comm &             communicator)
-    {
-      reinit(parallel_partitioning, communicator);
-    }
-
-    inline BlockVector::BlockVector(
-      const std::vector<IndexSet> &parallel_partitioning,
-      const std::vector<IndexSet> &ghost_indices,
-      const MPI_Comm &             communicator)
-    {
-      reinit(parallel_partitioning, ghost_indices, communicator);
-    }
-
-    inline BlockVector &
-    BlockVector::operator=(const value_type s)
-    {
-      BaseClass::operator=(s);
-      return *this;
-    }
-
-    inline BlockVector &
-    BlockVector::operator=(const BlockVector &v)
-    {
-      // we only allow assignment to vectors with the same number of blocks
-      // or to an empty BlockVector
-      Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
-             ExcDimensionMismatch(n_blocks(), v.n_blocks()));
-
-      if (this->n_blocks() != v.n_blocks())
-        reinit(v.n_blocks());
-
-      for (size_type i = 0; i < this->n_blocks(); ++i)
-        this->components[i] = v.block(i);
-
-      collect_sizes();
-
-      return *this;
-    }
-
-
-
-    inline void
-    BlockVector::reinit(const unsigned int n_blocks,
-                        const MPI_Comm &   communicator,
-                        const size_type    block_size,
-                        const size_type    local_size,
-                        const bool         omit_zeroing_entries)
-    {
-      reinit(std::vector<size_type>(n_blocks, block_size),
-             communicator,
-             std::vector<size_type>(n_blocks, local_size),
-             omit_zeroing_entries);
-    }
-
-
-
-    inline void
-    BlockVector::reinit(const std::vector<size_type> &block_sizes,
-                        const MPI_Comm &              communicator,
-                        const std::vector<size_type> &local_sizes,
-                        const bool                    omit_zeroing_entries)
-    {
-      this->block_indices.reinit(block_sizes);
-      if (this->components.size() != this->n_blocks())
-        this->components.resize(this->n_blocks());
-
-      for (unsigned int i = 0; i < this->n_blocks(); ++i)
-        this->components[i].reinit(communicator,
-                                   block_sizes[i],
-                                   local_sizes[i],
-                                   omit_zeroing_entries);
-    }
-
-
-    inline void
-    BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
-    {
-      this->block_indices = v.get_block_indices();
-      if (this->components.size() != this->n_blocks())
-        this->components.resize(this->n_blocks());
-
-      for (unsigned int i = 0; i < this->n_blocks(); ++i)
-        block(i).reinit(v.block(i), omit_zeroing_entries);
-    }
-
-    inline void
-    BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
-                        const MPI_Comm &             communicator)
-    {
-      std::vector<size_type> sizes(parallel_partitioning.size());
-      for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
-        sizes[i] = parallel_partitioning[i].size();
-
-      this->block_indices.reinit(sizes);
-      if (this->components.size() != this->n_blocks())
-        this->components.resize(this->n_blocks());
-
-      for (unsigned int i = 0; i < this->n_blocks(); ++i)
-        block(i).reinit(parallel_partitioning[i], communicator);
-    }
-
-    inline void
-    BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
-                        const std::vector<IndexSet> &ghost_entries,
-                        const MPI_Comm &             communicator)
-    {
-      std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
-      for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
-        sizes[i] = parallel_partitioning[i].size();
-
-      this->block_indices.reinit(sizes);
-      if (this->components.size() != this->n_blocks())
-        this->components.resize(this->n_blocks());
-
-      for (unsigned int i = 0; i < this->n_blocks(); ++i)
-        block(i).reinit(parallel_partitioning[i],
-                        ghost_entries[i],
-                        communicator);
-    }
-
-
-
-    inline const MPI_Comm &
-    BlockVector::get_mpi_communicator() const
-    {
-      return block(0).get_mpi_communicator();
-    }
-
-    inline bool
-    BlockVector::has_ghost_elements() const
-    {
-      bool ghosted = block(0).has_ghost_elements();
-#  ifdef DEBUG
-      for (unsigned int i = 0; i < this->n_blocks(); ++i)
-        Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError());
-#  endif
-      return ghosted;
-    }
-
-
-    inline void
-    BlockVector::swap(BlockVector &v)
-    {
-      std::swap(this->components, v.components);
-
-      ::dealii::swap(this->block_indices, v.block_indices);
-    }
-
-
-
-    inline void
-    BlockVector::print(std::ostream &     out,
-                       const unsigned int precision,
-                       const bool         scientific,
-                       const bool         across) const
-    {
-      for (unsigned int i = 0; i < this->n_blocks(); ++i)
-        {
-          if (across)
-            out << 'C' << i << ':';
-          else
-            out << "Component " << i << std::endl;
-          this->components[i].print(out, precision, scientific, across);
-        }
-    }
-
-
-
-    /**
-     * Global function which overloads the default implementation of the C++
-     * standard library which uses a temporary object. The function simply
-     * exchanges the data of the two vectors.
-     *
-     * @relatesalso PETScWrappers::MPI::BlockVector
-     * @author Wolfgang Bangerth, 2000
-     */
-    inline void
-    swap(BlockVector &u, BlockVector &v)
-    {
-      u.swap(v);
-    }
-
-  } // namespace MPI
-
-} // namespace PETScWrappers
-
-namespace internal
-{
-  namespace LinearOperatorImplementation
-  {
-    template <typename>
-    class ReinitHelper;
-
-    /**
-     * A helper class used internally in linear_operator.h. Specialization for
-     * PETScWrappers::MPI::BlockVector.
-     */
-    template <>
-    class ReinitHelper<PETScWrappers::MPI::BlockVector>
-    {
-    public:
-      template <typename Matrix>
-      static void
-      reinit_range_vector(const Matrix &                   matrix,
-                          PETScWrappers::MPI::BlockVector &v,
-                          bool /*omit_zeroing_entries*/)
-      {
-        v.reinit(matrix.locally_owned_range_indices(),
-                 matrix.get_mpi_communicator());
-      }
-
-      template <typename Matrix>
-      static void
-      reinit_domain_vector(const Matrix &                   matrix,
-                           PETScWrappers::MPI::BlockVector &v,
-                           bool /*omit_zeroing_entries*/)
-      {
-        v.reinit(matrix.locally_owned_domain_indices(),
-                 matrix.get_mpi_communicator());
-      }
-    };
-
-  } // namespace LinearOperatorImplementation
-} /* namespace internal */
-
-
-/**
- * Declare dealii::PETScWrappers::MPI::BlockVector as distributed vector.
- *
- * @author Uwe Koecher, 2017
- */
-template <>
-struct is_serial_vector<PETScWrappers::MPI::BlockVector> : std::false_type
-{};
-
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif // DEAL_II_WITH_PETSC
+#pragma DEAL_II_WARNING( \
+  "This file is deprecated. Use deal.II/lac/petsc_block_vector.h instead!")
 
 #endif
index f193d02373c50d0e60c6ac0760134ce5078e8a1c..559d6c385cd47fff31dfbc4d85af7b8a9c6fa148 100644 (file)
 
 #  include <deal.II/base/config.h>
 
-#  ifdef DEAL_II_WITH_PETSC
+#  include <deal.II/lac/petsc_sparse_matrix.h>
 
-#    include <deal.II/lac/exceptions.h>
-#    include <deal.II/lac/petsc_matrix_base.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
-
-#    include <vector>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-// forward declaration
-template <typename Matrix>
-class BlockMatrixBase;
-
-
-namespace PETScWrappers
-{
-  namespace MPI
-  {
-    /**
-     * Implementation of a parallel sparse matrix class based on PETSc, with
-     * rows of the matrix distributed across an MPI network. All the
-     * functionality is actually in the base class, except for the calls to
-     * generate a parallel sparse matrix. This is possible since PETSc only
-     * works on an abstract matrix type and internally distributes to
-     * functions that do the actual work depending on the actual matrix type
-     * (much like using virtual functions). Only the functions creating a
-     * matrix of specific type differ, and are implemented in this particular
-     * class.
-     *
-     * There are a number of comments on the communication model as well as
-     * access to individual elements in the documentation to the parallel
-     * vector class. These comments apply here as well.
-     *
-     *
-     * <h3>Partitioning of matrices</h3>
-     *
-     * PETSc partitions parallel matrices so that each MPI process "owns" a
-     * certain number of rows (i.e. only this process stores the respective
-     * entries in these rows). The number of rows each process owns has to be
-     * passed to the constructors and reinit() functions via the argument @p
-     * local_rows. The individual values passed as @p local_rows on all the
-     * MPI processes of course have to add up to the global number of rows of
-     * the matrix.
-     *
-     * In addition to this, PETSc also partitions the rectangular chunk of the
-     * matrix it owns (i.e. the @p local_rows times n() elements in the
-     * matrix), so that matrix vector multiplications can be performed
-     * efficiently. This column-partitioning therefore has to match the
-     * partitioning of the vectors with which the matrix is multiplied, just
-     * as the row-partitioning has to match the partitioning of destination
-     * vectors. This partitioning is passed to the constructors and reinit()
-     * functions through the @p local_columns variable, which again has to add
-     * up to the global number of columns in the matrix. The name @p
-     * local_columns may be named inappropriately since it does not reflect
-     * that only these columns are stored locally, but it reflects the fact
-     * that these are the columns for which the elements of incoming vectors
-     * are stored locally.
-     *
-     * To make things even more complicated, PETSc needs a very good estimate
-     * of the number of elements to be stored in each row to be efficient.
-     * Otherwise it spends most of the time with allocating small chunks of
-     * memory, a process that can slow down programs to a crawl if it happens
-     * to often. As if a good estimate of the number of entries per row isn't
-     * even, it even needs to split this as follows: for each row it owns, it
-     * needs an estimate for the number of elements in this row that fall into
-     * the columns that are set apart for this process (see above), and the
-     * number of elements that are in the rest of the columns.
-     *
-     * Since in general this information is not readily available, most of the
-     * initializing functions of this class assume that all of the number of
-     * elements you give as an argument to @p n_nonzero_per_row or by @p
-     * row_lengths fall into the columns "owned" by this process, and none
-     * into the other ones. This is a fair guess for most of the rows, since
-     * in a good domain partitioning, nodes only interact with nodes that are
-     * within the same subdomain. It does not hold for nodes on the interfaces
-     * of subdomain, however, and for the rows corresponding to these nodes,
-     * PETSc will have to allocate additional memory, a costly process.
-     *
-     * The only way to avoid this is to tell PETSc where the actual entries of
-     * the matrix will be. For this, there are constructors and reinit()
-     * functions of this class that take a DynamicSparsityPattern object
-     * containing all this information. While in the general case it is
-     * sufficient if the constructors and reinit() functions know the number
-     * of local rows and columns, the functions getting a sparsity pattern
-     * also need to know the number of local rows (@p local_rows_per_process)
-     * and columns (@p local_columns_per_process) for all other processes, in
-     * order to compute which parts of the matrix are which. Thus, it is not
-     * sufficient to just count the number of degrees of freedom that belong
-     * to a particular process, but you have to have the numbers for all
-     * processes available at all processes.
-     *
-     * @ingroup PETScWrappers
-     * @ingroup Matrix1
-     * @author Wolfgang Bangerth, 2004
-     */
-    class SparseMatrix : public MatrixBase
-    {
-    public:
-      /**
-       * Declare type for container size.
-       */
-      using size_type = types::global_dof_index;
-
-      /**
-       * A structure that describes some of the traits of this class in terms
-       * of its run-time behavior. Some other classes (such as the block
-       * matrix classes) that take one or other of the matrix classes as its
-       * template parameters can tune their behavior based on the variables in
-       * this class.
-       */
-      struct Traits
-      {
-        /**
-         * It is not safe to elide additions of zeros to individual elements
-         * of this matrix. The reason is that additions to the matrix may
-         * trigger collective operations synchronizing buffers on multiple
-         * processes. If an addition is elided on one process, this may lead
-         * to other processes hanging in an infinite waiting loop.
-         */
-        static const bool zero_addition_can_be_elided = false;
-      };
-
-      /**
-       * Default constructor. Create an empty matrix.
-       */
-      SparseMatrix();
-
-      /**
-       * Destructor to free the PETSc object.
-       */
-      ~SparseMatrix() override;
-
-      /**
-       * Create a sparse matrix of dimensions @p m times @p n, with an initial
-       * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row
-       * nonzero elements per row (see documentation of the MatCreateAIJ PETSc
-       * function for more information about these parameters). PETSc is able
-       * to cope with the situation that more than this number of elements are
-       * later allocated for a row, but this involves copying data, and is
-       * thus expensive.
-       *
-       * For the meaning of the @p local_row and @p local_columns parameters,
-       * see the class documentation.
-       *
-       * The @p is_symmetric flag determines whether we should tell PETSc that
-       * the matrix is going to be symmetric (as indicated by the call
-       * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
-       * documentation states that one cannot form an ILU decomposition of a
-       * matrix for which this flag has been set to @p true, only an ICC. The
-       * default value of this flag is @p false.
-       *
-       * @deprecated This constructor is deprecated: please use the
-       * constructor with a sparsity pattern argument instead.
-       */
-      DEAL_II_DEPRECATED
-      SparseMatrix(const MPI_Comm &communicator,
-                   const size_type m,
-                   const size_type n,
-                   const size_type local_rows,
-                   const size_type local_columns,
-                   const size_type n_nonzero_per_row,
-                   const bool      is_symmetric              = false,
-                   const size_type n_offdiag_nonzero_per_row = 0);
-
-      /**
-       * Initialize a rectangular matrix with @p m rows and @p n columns. The
-       * maximal number of nonzero entries for diagonal and off- diagonal
-       * blocks of each row is given by the @p row_lengths and @p
-       * offdiag_row_lengths arrays.
-       *
-       * For the meaning of the @p local_row and @p local_columns parameters,
-       * see the class documentation.
-       *
-       * Just as for the other constructors: PETSc is able to cope with the
-       * situation that more than this number of elements are later allocated
-       * for a row, but this involves copying data, and is thus expensive.
-       *
-       * The @p is_symmetric flag determines whether we should tell PETSc that
-       * the matrix is going to be symmetric (as indicated by the call
-       * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
-       * documentation states that one cannot form an ILU decomposition of a
-       * matrix for which this flag has been set to @p true, only an ICC. The
-       * default value of this flag is @p false.
-       *
-       * @deprecated This constructor is deprecated: please use the
-       * constructor with a sparsity pattern argument instead.
-       */
-      DEAL_II_DEPRECATED
-      SparseMatrix(const MPI_Comm &              communicator,
-                   const size_type               m,
-                   const size_type               n,
-                   const size_type               local_rows,
-                   const size_type               local_columns,
-                   const std::vector<size_type> &row_lengths,
-                   const bool                    is_symmetric = false,
-                   const std::vector<size_type> &offdiag_row_lengths =
-                     std::vector<size_type>());
-
-      /**
-       * Initialize using the given sparsity pattern with communication
-       * happening over the provided @p communicator.
-       *
-       * For the meaning of the @p local_rows_per_process and @p
-       * local_columns_per_process parameters, see the class documentation.
-       *
-       * Note that PETSc can be very slow if you do not provide it with a good
-       * estimate of the lengths of rows. Using the present function is a very
-       * efficient way to do this, as it uses the exact number of nonzero
-       * entries for each row of the matrix by using the given sparsity
-       * pattern argument. If the @p preset_nonzero_locations flag is @p true,
-       * this function in addition not only sets the correct row sizes up
-       * front, but also pre-allocated the correct nonzero entries in the
-       * matrix.
-       *
-       * PETsc allows to later add additional nonzero entries to a matrix, by
-       * simply writing to these elements. However, this will then lead to
-       * additional memory allocations which are very inefficient and will
-       * greatly slow down your program. It is therefore significantly more
-       * efficient to get memory allocation right from the start.
-       */
-      template <typename SparsityPatternType>
-      SparseMatrix(const MPI_Comm &              communicator,
-                   const SparsityPatternType &   sparsity_pattern,
-                   const std::vector<size_type> &local_rows_per_process,
-                   const std::vector<size_type> &local_columns_per_process,
-                   const unsigned int            this_process,
-                   const bool preset_nonzero_locations = true);
-
-      /**
-       * This operator assigns a scalar to a matrix. Since this does usually
-       * not make much sense (should we set all matrix entries to this value?
-       * Only the nonzero entries of the sparsity pattern?), this operation is
-       * only allowed if the actual value to be assigned is zero. This
-       * operator only exists to allow for the obvious notation
-       * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
-       * keep the sparsity pattern previously used.
-       */
-      SparseMatrix &
-      operator=(const value_type d);
-
-
-      /**
-       * Make a copy of the PETSc matrix @p other. It is assumed that both
-       * matrices have the same SparsityPattern.
-       */
-      void
-      copy_from(const SparseMatrix &other);
-
-      /**
-       * Throw away the present matrix and generate one that has the same
-       * properties as if it were created by the constructor of this class
-       * with the same argument list as the present function.
-       *
-       * @deprecated This overload of <code>reinit</code> is deprecated:
-       * please use the overload with a sparsity pattern argument instead.
-       */
-      DEAL_II_DEPRECATED
-      void
-      reinit(const MPI_Comm &communicator,
-             const size_type m,
-             const size_type n,
-             const size_type local_rows,
-             const size_type local_columns,
-             const size_type n_nonzero_per_row,
-             const bool      is_symmetric              = false,
-             const size_type n_offdiag_nonzero_per_row = 0);
-
-      /**
-       * Throw away the present matrix and generate one that has the same
-       * properties as if it were created by the constructor of this class
-       * with the same argument list as the present function.
-       *
-       * @deprecated This overload of <code>reinit</code> is deprecated:
-       * please use the overload with a sparsity pattern argument instead.
-       */
-      DEAL_II_DEPRECATED
-      void
-      reinit(const MPI_Comm &              communicator,
-             const size_type               m,
-             const size_type               n,
-             const size_type               local_rows,
-             const size_type               local_columns,
-             const std::vector<size_type> &row_lengths,
-             const bool                    is_symmetric = false,
-             const std::vector<size_type> &offdiag_row_lengths =
-               std::vector<size_type>());
-
-      /**
-       * Initialize using the given sparsity pattern with communication
-       * happening over the provided @p communicator.
-       *
-       * Note that PETSc can be very slow if you do not provide it with a good
-       * estimate of the lengths of rows. Using the present function is a very
-       * efficient way to do this, as it uses the exact number of nonzero
-       * entries for each row of the matrix by using the given sparsity
-       * pattern argument. If the @p preset_nonzero_locations flag is @p true,
-       * this function in addition not only sets the correct row sizes up
-       * front, but also pre-allocated the correct nonzero entries in the
-       * matrix.
-       *
-       * PETsc allows to later add additional nonzero entries to a matrix, by
-       * simply writing to these elements. However, this will then lead to
-       * additional memory allocations which are very inefficient and will
-       * greatly slow down your program. It is therefore significantly more
-       * efficient to get memory allocation right from the start.
-       */
-      template <typename SparsityPatternType>
-      void
-      reinit(const MPI_Comm &              communicator,
-             const SparsityPatternType &   sparsity_pattern,
-             const std::vector<size_type> &local_rows_per_process,
-             const std::vector<size_type> &local_columns_per_process,
-             const unsigned int            this_process,
-             const bool                    preset_nonzero_locations = true);
-
-      /**
-       * Create a matrix where the size() of the IndexSets determine the
-       * global number of rows and columns and the entries of the IndexSet
-       * give the rows and columns for the calling processor. Note that only
-       * ascending, 1:1 IndexSets are supported.
-       */
-      template <typename SparsityPatternType>
-      void
-      reinit(const IndexSet &           local_rows,
-             const IndexSet &           local_columns,
-             const SparsityPatternType &sparsity_pattern,
-             const MPI_Comm &           communicator);
-
-      /**
-       * Initialize this matrix to have the same structure as @p other. This
-       * will not copy the values of the other matrix, but you can use
-       * copy_from() for this.
-       */
-      void
-      reinit(const SparseMatrix &other);
-
-      /**
-       * Return a reference to the MPI communicator object in use with this
-       * matrix.
-       */
-      virtual const MPI_Comm &
-      get_mpi_communicator() const override;
-
-      /**
-       * @addtogroup Exceptions
-       * @{
-       */
-      /**
-       * Exception
-       */
-      DeclException2(ExcLocalRowsTooLarge,
-                     int,
-                     int,
-                     << "The number of local rows " << arg1
-                     << " must be larger than the total number of rows "
-                     << arg2);
-      //@}
-
-      /**
-       * Return the square of the norm of the vector $v$ with respect to the
-       * norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is
-       * useful, e.g. in the finite element context, where the $L_2$ norm of a
-       * function equals the matrix norm with respect to the mass matrix of
-       * the vector representing the nodal values of the finite element
-       * function.
-       *
-       * Obviously, the matrix needs to be quadratic for this operation.
-       *
-       * The implementation of this function is not as efficient as the one in
-       * the @p MatrixBase class used in deal.II (i.e. the original one, not
-       * the PETSc wrapper class) since PETSc doesn't support this operation
-       * and needs a temporary vector.
-       */
-      PetscScalar
-      matrix_norm_square(const Vector &v) const;
-
-      /**
-       * Compute the matrix scalar product $\left(u^\ast,Mv\right)$.
-       *
-       * The implementation of this function is not as efficient as the one in
-       * the @p MatrixBase class used in deal.II (i.e. the original one, not
-       * the PETSc wrapper class) since PETSc doesn't support this operation
-       * and needs a temporary vector.
-       */
-      PetscScalar
-      matrix_scalar_product(const Vector &u, const Vector &v) const;
-
-      /**
-       * Return the partitioning of the domain space of this matrix, i.e., the
-       * partitioning of the vectors this matrix has to be multiplied with.
-       */
-      IndexSet
-      locally_owned_domain_indices() const;
-
-      /**
-       * Return the partitioning of the range space of this matrix, i.e., the
-       * partitioning of the vectors that result from matrix-vector
-       * products.
-       */
-      IndexSet
-      locally_owned_range_indices() const;
-
-      /**
-       * Perform the matrix-matrix multiplication $C = AB$, or,
-       * $C = A \text{diag}(V) B$ given a compatible vector $V$.
-       *
-       * This function calls MatrixBase::mmult() to do the actual work.
-       */
-      void
-      mmult(SparseMatrix &      C,
-            const SparseMatrix &B,
-            const MPI::Vector & V = MPI::Vector()) const;
-
-      /**
-       * Perform the matrix-matrix multiplication with the transpose of
-       * <tt>this</tt>, i.e., $C = A^T B$, or,
-       * $C = A^T \text{diag}(V) B$ given a compatible vector $V$.
-       *
-       * This function calls MatrixBase::Tmmult() to do the actual work.
-       */
-      void
-      Tmmult(SparseMatrix &      C,
-             const SparseMatrix &B,
-             const MPI::Vector & V = MPI::Vector()) const;
-
-    private:
-      /**
-       * Copy of the communicator object to be used for this parallel vector.
-       */
-      MPI_Comm communicator;
-
-      /**
-       * Do the actual work for the respective reinit() function and the
-       * matching constructor, i.e. create a matrix. Getting rid of the
-       * previous matrix is left to the caller.
-       *
-       * @deprecated This overload of <code>do_reinit</code> is deprecated:
-       * please use the overload with a sparsity pattern argument instead.
-       */
-      DEAL_II_DEPRECATED
-      void
-      do_reinit(const size_type m,
-                const size_type n,
-                const size_type local_rows,
-                const size_type local_columns,
-                const size_type n_nonzero_per_row,
-                const bool      is_symmetric              = false,
-                const size_type n_offdiag_nonzero_per_row = 0);
-
-      /**
-       * Same as previous function.
-       *
-       * @deprecated This overload of <code>do_reinit</code> is deprecated:
-       * please use the overload with a sparsity pattern argument instead.
-       */
-      DEAL_II_DEPRECATED
-      void
-      do_reinit(const size_type               m,
-                const size_type               n,
-                const size_type               local_rows,
-                const size_type               local_columns,
-                const std::vector<size_type> &row_lengths,
-                const bool                    is_symmetric = false,
-                const std::vector<size_type> &offdiag_row_lengths =
-                  std::vector<size_type>());
-
-      /**
-       * Same as previous functions.
-       */
-      template <typename SparsityPatternType>
-      void
-      do_reinit(const SparsityPatternType &   sparsity_pattern,
-                const std::vector<size_type> &local_rows_per_process,
-                const std::vector<size_type> &local_columns_per_process,
-                const unsigned int            this_process,
-                const bool                    preset_nonzero_locations);
-
-      /**
-       * Same as previous functions.
-       */
-      template <typename SparsityPatternType>
-      void
-      do_reinit(const IndexSet &           local_rows,
-                const IndexSet &           local_columns,
-                const SparsityPatternType &sparsity_pattern);
-
-      /**
-       * To allow calling protected prepare_add() and prepare_set().
-       */
-      friend class BlockMatrixBase<SparseMatrix>;
-    };
-
-
-
-    // -------- template and inline functions ----------
-
-    inline const MPI_Comm &
-    SparseMatrix::get_mpi_communicator() const
-    {
-      return communicator;
-    }
-  } // namespace MPI
-} // namespace PETScWrappers
-
-DEAL_II_NAMESPACE_CLOSE
-
-#  endif // DEAL_II_WITH_PETSC
+#  pragma DEAL_II_WARNING( \
+    "This file is deprecated. Use deal.II/lac/petsc_sparse_matrix.h instead.")
 
 #endif
 /*---------------------- petsc_parallel_sparse_matrix.h ---------------------*/
index 12c4bc2cd615f09a2fe441f1bf3aec6bdd771e17..a238651932c1acd2b52a2ac125aefcf511b72ff7 100644 (file)
 #ifndef dealii_petsc_parallel_vector_h
 #  define dealii_petsc_parallel_vector_h
 
-
 #  include <deal.II/base/config.h>
 
-#  ifdef DEAL_II_WITH_PETSC
-
-#    include <deal.II/base/index_set.h>
-#    include <deal.II/base/subscriptor.h>
-
-#    include <deal.II/lac/exceptions.h>
-#    include <deal.II/lac/petsc_vector_base.h>
-#    include <deal.II/lac/vector.h>
-#    include <deal.II/lac/vector_operation.h>
-#    include <deal.II/lac/vector_type_traits.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-/*! @addtogroup PETScWrappers
- *@{
- */
-namespace PETScWrappers
-{
-  /**
-   * Namespace for PETSc classes that work in parallel over MPI, such as
-   * distributed vectors and matrices.
-   *
-   * @ingroup PETScWrappers
-   * @author Wolfgang Bangerth, 2004
-   */
-  namespace MPI
-  {
-    /**
-     * Implementation of a parallel vector class based on PETSC and using MPI
-     * communication to synchronize distributed operations. All the
-     * functionality is actually in the base class, except for the calls to
-     * generate a parallel vector. This is possible since PETSc only works on
-     * an abstract vector type and internally distributes to functions that do
-     * the actual work depending on the actual vector type (much like using
-     * virtual functions). Only the functions creating a vector of specific
-     * type differ, and are implemented in this particular class.
-     *
-     *
-     * <h3>Parallel communication model</h3>
-     *
-     * The parallel functionality of PETSc is built on top of the Message
-     * Passing Interface (MPI). MPI's communication model is built on
-     * collective communications: if one process wants something from another,
-     * that other process has to be willing to accept this communication. A
-     * process cannot query data from another process by calling a remote
-     * function, without that other process expecting such a transaction. The
-     * consequence is that most of the operations in the base class of this
-     * class have to be called collectively. For example, if you want to
-     * compute the l2 norm of a parallel vector, @em all processes across
-     * which this vector is shared have to call the @p l2_norm function. If
-     * you don't do this, but instead only call the @p l2_norm function on one
-     * process, then the following happens: This one process will call one of
-     * the collective MPI functions and wait for all the other processes to
-     * join in on this. Since the other processes don't call this function,
-     * you will either get a time-out on the first process, or, worse, by the
-     * time the next a call to a PETSc function generates an MPI message on
-     * the other processes, you will get a cryptic message that only a subset
-     * of processes attempted a communication. These bugs can be very hard to
-     * figure out, unless you are well-acquainted with the communication model
-     * of MPI, and know which functions may generate MPI messages.
-     *
-     * One particular case, where an MPI message may be generated unexpectedly
-     * is discussed below.
-     *
-     *
-     * <h3>Accessing individual elements of a vector</h3>
-     *
-     * PETSc does allow read access to individual elements of a vector, but in
-     * the distributed case only to elements that are stored locally. We
-     * implement this through calls like <tt>d=vec(i)</tt>. However, if you
-     * access an element outside the locally stored range, an exception is
-     * generated.
-     *
-     * In contrast to read access, PETSc (and the respective deal.II wrapper
-     * classes) allow to write (or add) to individual elements of vectors,
-     * even if they are stored on a different process. You can do this
-     * writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>, or
-     * similar operations. There is one catch, however, that may lead to very
-     * confusing error messages: PETSc requires application programs to call
-     * the compress() function when they switch from adding, to elements to
-     * writing to elements. The reasoning is that all processes might
-     * accumulate addition operations to elements, even if multiple processes
-     * write to the same elements. By the time we call compress() the next
-     * time, all these additions are executed. However, if one process adds to
-     * an element, and another overwrites to it, the order of execution would
-     * yield non-deterministic behavior if we don't make sure that a
-     * synchronization with compress() happens in between.
-     *
-     * In order to make sure these calls to compress() happen at the
-     * appropriate time, the deal.II wrappers keep a state variable that store
-     * which is the presently allowed operation: additions or writes. If it
-     * encounters an operation of the opposite kind, it calls compress() and
-     * flips the state. This can sometimes lead to very confusing behavior, in
-     * code that may for example look like this:
-     * @code
-     *   PETScWrappers::MPI::Vector vector;
-     *   ...
-     *   // do some write operations on the vector
-     *   for (unsigned int i=0; i<vector.size(); ++i)
-     *     vector(i) = i;
-     *
-     *   // do some additions to vector elements, but only for some elements
-     *   for (unsigned int i=0; i<vector.size(); ++i)
-     *     if (some_condition(i) == true)
-     *       vector(i) += 1;
-     *
-     *   // do another collective operation
-     *   const double norm = vector.l2_norm();
-     * @endcode
-     *
-     * This code can run into trouble: by the time we see the first addition
-     * operation, we need to flush the overwrite buffers for the vector, and
-     * the deal.II library will do so by calling compress(). However, it will
-     * only do so for all processes that actually do an addition -- if the
-     * condition is never true for one of the processes, then this one will
-     * not get to the actual compress() call, whereas all the other ones do.
-     * This gets us into trouble, since all the other processes hang in the
-     * call to flush the write buffers, while the one other process advances
-     * to the call to compute the l2 norm. At this time, you will get an error
-     * that some operation was attempted by only a subset of processes. This
-     * behavior may seem surprising, unless you know that write/addition
-     * operations on single elements may trigger this behavior.
-     *
-     * The problem described here may be avoided by placing additional calls
-     * to compress(), or making sure that all processes do the same type of
-     * operations at the same time, for example by placing zero additions if
-     * necessary.
-     *
-     * @see
-     * @ref GlossGhostedVector "vectors with ghost elements"
-     *
-     * @ingroup PETScWrappers
-     * @ingroup Vectors
-     * @author Wolfgang Bangerth, 2004
-     */
-    class Vector : public VectorBase
-    {
-    public:
-      /**
-       * Declare type for container size.
-       */
-      using size_type = types::global_dof_index;
-
-      /**
-       * Default constructor. Initialize the vector as empty.
-       */
-      Vector();
-
-      /**
-       * Constructor. Set dimension to @p n and initialize all elements with
-       * zero.
-       *
-       * @arg local_size denotes the size of the chunk that shall be stored on
-       * the present process.
-       *
-       * @arg communicator denotes the MPI communicator over which the
-       * different parts of the vector shall communicate
-       *
-       * The constructor is made explicit to avoid accidents like this:
-       * <tt>v=0;</tt>. Presumably, the user wants to set every element of the
-       * vector to zero, but instead, what happens is this call:
-       * <tt>v=Vector@<number@>(0);</tt>, i.e. the vector is replaced by one
-       * of length zero.
-       */
-      explicit Vector(const MPI_Comm &communicator,
-                      const size_type n,
-                      const size_type local_size);
-
-
-      /**
-       * Copy-constructor from deal.II vectors. Sets the dimension to that of
-       * the given vector, and copies all elements.
-       *
-       * @arg local_size denotes the size of the chunk that shall be stored on
-       * the present process.
-       *
-       * @arg communicator denotes the MPI communicator over which the
-       * different parts of the vector shall communicate
-       */
-      template <typename Number>
-      explicit Vector(const MPI_Comm &              communicator,
-                      const dealii::Vector<Number> &v,
-                      const size_type               local_size);
-
-
-      /**
-       * Copy-constructor the values from a PETSc wrapper vector class.
-       *
-       * @arg local_size denotes the size of the chunk that shall be stored on
-       * the present process.
-       *
-       * @arg communicator denotes the MPI communicator over which the
-       * different parts of the vector shall communicate
-       *
-       * @deprecated The use of objects that are explicitly of type VectorBase
-       * is deprecated: use PETScWrappers::MPI::Vector instead.
-       */
-      DEAL_II_DEPRECATED
-      explicit Vector(const MPI_Comm &  communicator,
-                      const VectorBase &v,
-                      const size_type   local_size);
-
-      /**
-       * Construct a new parallel ghosted PETSc vector from IndexSets.
-       *
-       * Note that @p local must be ascending and 1:1, see
-       * IndexSet::is_ascending_and_one_to_one().  In particular, the DoFs in
-       * @p local need to be contiguous, meaning you can only create vectors
-       * from a DoFHandler with several finite element components if they are
-       * not reordered by component (use a PETScWrappers::BlockVector
-       * otherwise).  The global size of the vector is determined by
-       * local.size(). The global indices in @p ghost are supplied as ghost
-       * indices so that they can be read locally.
-       *
-       * Note that the @p ghost IndexSet may be empty and that any indices
-       * already contained in @p local are ignored during construction. That
-       * way, the ghost parameter can equal the set of locally relevant
-       * degrees of freedom, see step-32.
-       *
-       * @note This operation always creates a ghosted vector, which is considered
-       * read-only.
-       *
-       * @see
-       * @ref GlossGhostedVector "vectors with ghost elements"
-       */
-      Vector(const IndexSet &local,
-             const IndexSet &ghost,
-             const MPI_Comm &communicator);
-
-      /**
-       * Construct a new parallel PETSc vector without ghost elements from an
-       * IndexSet.
-       *
-       * Note that @p local must be ascending and 1:1, see
-       * IndexSet::is_ascending_and_one_to_one().  In particular, the DoFs in
-       * @p local need to be contiguous, meaning you can only create vectors
-       * from a DoFHandler with several finite element components if they are
-       * not reordered by component (use a PETScWrappers::BlockVector
-       * otherwise).
-       */
-      explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
-
-      /**
-       * Release all memory and return to a state just like after having
-       * called the default constructor.
-       */
-      virtual void
-      clear() override;
-
-      /**
-       * Copy the given vector. Resize the present vector if necessary. Also
-       * take over the MPI communicator of @p v.
-       */
-      Vector &
-      operator=(const Vector &v);
-
-      /**
-       * Set all components of the vector to the given number @p s. Simply
-       * pass this down to the base class, but we still need to declare this
-       * function to make the example given in the discussion about making the
-       * constructor explicit work.
-       */
-      Vector &
-      operator=(const PetscScalar s);
-
-      /**
-       * Copy the values of a deal.II vector (as opposed to those of the PETSc
-       * vector wrapper class) into this object.
-       *
-       * Contrary to the case of sequential vectors, this operators requires
-       * that the present vector already has the correct size, since we need
-       * to have a partition and a communicator present which we otherwise
-       * can't get from the source vector.
-       */
-      template <typename number>
-      Vector &
-      operator=(const dealii::Vector<number> &v);
-
-      /**
-       * Change the dimension of the vector to @p N. It is unspecified how
-       * resizing the vector affects the memory allocation of this object;
-       * i.e., it is not guaranteed that resizing it to a smaller size
-       * actually also reduces memory consumption, or if for efficiency the
-       * same amount of memory is used
-       *
-       * @p local_size denotes how many of the @p N values shall be stored
-       * locally on the present process. for less data.
-       *
-       * @p communicator denotes the MPI communicator henceforth to be used
-       * for this vector.
-       *
-       * If @p omit_zeroing_entries is false, the vector is filled by zeros.
-       * Otherwise, the elements are left an unspecified state.
-       */
-      void
-      reinit(const MPI_Comm &communicator,
-             const size_type N,
-             const size_type local_size,
-             const bool      omit_zeroing_entries = false);
-
-      /**
-       * Change the dimension to that of the vector @p v, and also take over
-       * the partitioning into local sizes as well as the MPI communicator.
-       * The same applies as for the other @p reinit function.
-       *
-       * The elements of @p v are not copied, i.e. this function is the same
-       * as calling <tt>reinit(v.size(), v.local_size(),
-       * omit_zeroing_entries)</tt>.
-       */
-      void
-      reinit(const Vector &v, const bool omit_zeroing_entries = false);
-
-      /**
-       * Reinit as a vector with ghost elements. See the constructor with
-       * same signature for more details.
-       *
-       * @see
-       * @ref GlossGhostedVector "vectors with ghost elements"
-       */
-      void
-      reinit(const IndexSet &local,
-             const IndexSet &ghost,
-             const MPI_Comm &communicator);
-
-      /**
-       * Reinit as a vector without ghost elements. See constructor with same
-       * signature for more details.
-       *
-       * @see
-       * @ref GlossGhostedVector "vectors with ghost elements"
-       */
-      void
-      reinit(const IndexSet &local, const MPI_Comm &communicator);
-
-      /**
-       * Return a reference to the MPI communicator object in use with this
-       * vector.
-       */
-      const MPI_Comm &
-      get_mpi_communicator() const override;
-
-      /**
-       * Print to a stream. @p precision denotes the desired precision with
-       * which values shall be printed, @p scientific whether scientific
-       * notation shall be used. If @p across is @p true then the vector is
-       * printed in a line, while if @p false then the elements are printed on
-       * a separate line each.
-       *
-       * @note This function overloads the one in the base class to ensure
-       * that the right thing happens for parallel vectors that are
-       * distributed across processors.
-       */
-      void
-      print(std::ostream &     out,
-            const unsigned int precision  = 3,
-            const bool         scientific = true,
-            const bool         across     = true) const;
-
-      /**
-       * @copydoc PETScWrappers::VectorBase::all_zero()
-       *
-       * @note This function overloads the one in the base class to make this
-       * a collective operation.
-       */
-      bool
-      all_zero() const;
-
-    protected:
-      /**
-       * Create a vector of length @p n. For this class, we create a parallel
-       * vector. @p n denotes the total size of the vector to be created. @p
-       * local_size denotes how many of these elements shall be stored
-       * locally.
-       */
-      virtual void
-      create_vector(const size_type n, const size_type local_size);
-
-
-
-      /**
-       * Create a vector of global length @p n, local size @p local_size and
-       * with the specified ghost indices. Note that you need to call
-       * update_ghost_values() before accessing those.
-       */
-      virtual void
-      create_vector(const size_type n,
-                    const size_type local_size,
-                    const IndexSet &ghostnodes);
-
-
-    private:
-      /**
-       * Copy of the communicator object to be used for this parallel vector.
-       */
-      MPI_Comm communicator;
-    };
-
-
-    // ------------------ template and inline functions -------------
-
-
-    /**
-     * Global function @p swap which overloads the default implementation of
-     * the C++ standard library which uses a temporary object. The function
-     * simply exchanges the data of the two vectors.
-     *
-     * @relatesalso PETScWrappers::MPI::Vector
-     * @author Wolfgang Bangerth, 2004
-     */
-    inline void
-    swap(Vector &u, Vector &v)
-    {
-      u.swap(v);
-    }
-
-
-#    ifndef DOXYGEN
-
-    template <typename number>
-    Vector::Vector(const MPI_Comm &              communicator,
-                   const dealii::Vector<number> &v,
-                   const size_type               local_size)
-      : communicator(communicator)
-    {
-      Vector::create_vector(v.size(), local_size);
-
-      *this = v;
-    }
-
-
-
-    inline Vector &
-    Vector::operator=(const PetscScalar s)
-    {
-      VectorBase::operator=(s);
-
-      return *this;
-    }
-
-
-
-    template <typename number>
-    inline Vector &
-    Vector::operator=(const dealii::Vector<number> &v)
-    {
-      Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
-
-      // FIXME: the following isn't necessarily fast, but this is due to
-      // the fact that PETSc doesn't offer an inlined access operator.
-      //
-      // if someone wants to contribute some code: to make this code
-      // faster, one could either first convert all values to PetscScalar,
-      // and then set them all at once using VecSetValues. This has the
-      // drawback that it could take quite some memory, if the vector is
-      // large, and it would in addition allocate memory on the heap, which
-      // is expensive. an alternative would be to split the vector into
-      // chunks of, say, 128 elements, convert a chunk at a time and set it
-      // in the output vector using VecSetValues. since 128 elements is
-      // small enough, this could easily be allocated on the stack (as a
-      // local variable) which would make the whole thing much more
-      // efficient.
-      //
-      // a second way to make things faster is for the special case that
-      // number==PetscScalar. we could then declare a specialization of
-      // this template, and omit the conversion. the problem with this is
-      // that the best we can do is to use VecSetValues, but this isn't
-      // very efficient either: it wants to see an array of indices, which
-      // in this case a) again takes up a whole lot of memory on the heap,
-      // and b) is totally dumb since its content would simply be the
-      // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
-      // function in Petsc that would take a pointer to an array of
-      // PetscScalar values and simply copy n elements verbatim into the
-      // vector...
-      for (size_type i = 0; i < v.size(); ++i)
-        (*this)(i) = v(i);
-
-      compress(::dealii::VectorOperation::insert);
-
-      return *this;
-    }
-
-
-
-    inline const MPI_Comm &
-    Vector::get_mpi_communicator() const
-    {
-      return communicator;
-    }
-
-#    endif // DOXYGEN
-  }        // namespace MPI
-} // namespace PETScWrappers
-
-namespace internal
-{
-  namespace LinearOperatorImplementation
-  {
-    template <typename>
-    class ReinitHelper;
-
-    /**
-     * A helper class used internally in linear_operator.h. Specialization for
-     * PETScWrappers::MPI::Vector.
-     */
-    template <>
-    class ReinitHelper<PETScWrappers::MPI::Vector>
-    {
-    public:
-      template <typename Matrix>
-      static void
-      reinit_range_vector(const Matrix &              matrix,
-                          PETScWrappers::MPI::Vector &v,
-                          bool /*omit_zeroing_entries*/)
-      {
-        v.reinit(matrix.locally_owned_range_indices(),
-                 matrix.get_mpi_communicator());
-      }
-
-      template <typename Matrix>
-      static void
-      reinit_domain_vector(const Matrix &              matrix,
-                           PETScWrappers::MPI::Vector &v,
-                           bool /*omit_zeroing_entries*/)
-      {
-        v.reinit(matrix.locally_owned_domain_indices(),
-                 matrix.get_mpi_communicator());
-      }
-    };
-
-  } // namespace LinearOperatorImplementation
-} /* namespace internal */
-
-/**@}*/
-
-
-/**
- * Declare dealii::PETScWrappers::MPI::Vector as distributed vector.
- *
- * @author Uwe Koecher, 2017
- */
-template <>
-struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
-{};
-
-
-DEAL_II_NAMESPACE_CLOSE
+#  include <deal.II/lac/petsc_vector.h>
 
-#  endif // DEAL_II_WITH_PETSC
+#  pragma DEAL_II_WARNING( \
+    "This file is deprecated. Use deal.II/lac/petsc_vector.h instead!")
 
 #endif
 /*------------------------- petsc_parallel_vector.h -------------------------*/
index f192d1d3fedd242aa67da09cfb87bfd3d3ca32f5..a36670fbcbf8cf405bed4fbebe8e6b8a5716af2b 100644 (file)
@@ -23,7 +23,7 @@
 
 #    include <deal.II/lac/exceptions.h>
 #    include <deal.II/lac/petsc_matrix_base.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 
 #    include <vector>
 
@@ -284,6 +284,493 @@ namespace PETScWrappers
      */
     friend class BlockMatrixBase<SparseMatrix>;
   };
+
+  namespace MPI
+  {
+    /**
+     * Implementation of a parallel sparse matrix class based on PETSc, with
+     * rows of the matrix distributed across an MPI network. All the
+     * functionality is actually in the base class, except for the calls to
+     * generate a parallel sparse matrix. This is possible since PETSc only
+     * works on an abstract matrix type and internally distributes to
+     * functions that do the actual work depending on the actual matrix type
+     * (much like using virtual functions). Only the functions creating a
+     * matrix of specific type differ, and are implemented in this particular
+     * class.
+     *
+     * There are a number of comments on the communication model as well as
+     * access to individual elements in the documentation to the parallel
+     * vector class. These comments apply here as well.
+     *
+     *
+     * <h3>Partitioning of matrices</h3>
+     *
+     * PETSc partitions parallel matrices so that each MPI process "owns" a
+     * certain number of rows (i.e. only this process stores the respective
+     * entries in these rows). The number of rows each process owns has to be
+     * passed to the constructors and reinit() functions via the argument @p
+     * local_rows. The individual values passed as @p local_rows on all the
+     * MPI processes of course have to add up to the global number of rows of
+     * the matrix.
+     *
+     * In addition to this, PETSc also partitions the rectangular chunk of the
+     * matrix it owns (i.e. the @p local_rows times n() elements in the
+     * matrix), so that matrix vector multiplications can be performed
+     * efficiently. This column-partitioning therefore has to match the
+     * partitioning of the vectors with which the matrix is multiplied, just
+     * as the row-partitioning has to match the partitioning of destination
+     * vectors. This partitioning is passed to the constructors and reinit()
+     * functions through the @p local_columns variable, which again has to add
+     * up to the global number of columns in the matrix. The name @p
+     * local_columns may be named inappropriately since it does not reflect
+     * that only these columns are stored locally, but it reflects the fact
+     * that these are the columns for which the elements of incoming vectors
+     * are stored locally.
+     *
+     * To make things even more complicated, PETSc needs a very good estimate
+     * of the number of elements to be stored in each row to be efficient.
+     * Otherwise it spends most of the time with allocating small chunks of
+     * memory, a process that can slow down programs to a crawl if it happens
+     * to often. As if a good estimate of the number of entries per row isn't
+     * even, it even needs to split this as follows: for each row it owns, it
+     * needs an estimate for the number of elements in this row that fall into
+     * the columns that are set apart for this process (see above), and the
+     * number of elements that are in the rest of the columns.
+     *
+     * Since in general this information is not readily available, most of the
+     * initializing functions of this class assume that all of the number of
+     * elements you give as an argument to @p n_nonzero_per_row or by @p
+     * row_lengths fall into the columns "owned" by this process, and none
+     * into the other ones. This is a fair guess for most of the rows, since
+     * in a good domain partitioning, nodes only interact with nodes that are
+     * within the same subdomain. It does not hold for nodes on the interfaces
+     * of subdomain, however, and for the rows corresponding to these nodes,
+     * PETSc will have to allocate additional memory, a costly process.
+     *
+     * The only way to avoid this is to tell PETSc where the actual entries of
+     * the matrix will be. For this, there are constructors and reinit()
+     * functions of this class that take a DynamicSparsityPattern object
+     * containing all this information. While in the general case it is
+     * sufficient if the constructors and reinit() functions know the number
+     * of local rows and columns, the functions getting a sparsity pattern
+     * also need to know the number of local rows (@p local_rows_per_process)
+     * and columns (@p local_columns_per_process) for all other processes, in
+     * order to compute which parts of the matrix are which. Thus, it is not
+     * sufficient to just count the number of degrees of freedom that belong
+     * to a particular process, but you have to have the numbers for all
+     * processes available at all processes.
+     *
+     * @ingroup PETScWrappers
+     * @ingroup Matrix1
+     * @author Wolfgang Bangerth, 2004
+     */
+    class SparseMatrix : public MatrixBase
+    {
+    public:
+      /**
+       * Declare type for container size.
+       */
+      using size_type = types::global_dof_index;
+
+      /**
+       * A structure that describes some of the traits of this class in terms
+       * of its run-time behavior. Some other classes (such as the block
+       * matrix classes) that take one or other of the matrix classes as its
+       * template parameters can tune their behavior based on the variables in
+       * this class.
+       */
+      struct Traits
+      {
+        /**
+         * It is not safe to elide additions of zeros to individual elements
+         * of this matrix. The reason is that additions to the matrix may
+         * trigger collective operations synchronizing buffers on multiple
+         * processes. If an addition is elided on one process, this may lead
+         * to other processes hanging in an infinite waiting loop.
+         */
+        static const bool zero_addition_can_be_elided = false;
+      };
+
+      /**
+       * Default constructor. Create an empty matrix.
+       */
+      SparseMatrix();
+
+      /**
+       * Destructor to free the PETSc object.
+       */
+      ~SparseMatrix() override;
+
+      /**
+       * Create a sparse matrix of dimensions @p m times @p n, with an initial
+       * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row
+       * nonzero elements per row (see documentation of the MatCreateAIJ PETSc
+       * function for more information about these parameters). PETSc is able
+       * to cope with the situation that more than this number of elements are
+       * later allocated for a row, but this involves copying data, and is
+       * thus expensive.
+       *
+       * For the meaning of the @p local_row and @p local_columns parameters,
+       * see the class documentation.
+       *
+       * The @p is_symmetric flag determines whether we should tell PETSc that
+       * the matrix is going to be symmetric (as indicated by the call
+       * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
+       * documentation states that one cannot form an ILU decomposition of a
+       * matrix for which this flag has been set to @p true, only an ICC. The
+       * default value of this flag is @p false.
+       *
+       * @deprecated This constructor is deprecated: please use the
+       * constructor with a sparsity pattern argument instead.
+       */
+      DEAL_II_DEPRECATED
+      SparseMatrix(const MPI_Comm &communicator,
+                   const size_type m,
+                   const size_type n,
+                   const size_type local_rows,
+                   const size_type local_columns,
+                   const size_type n_nonzero_per_row,
+                   const bool      is_symmetric              = false,
+                   const size_type n_offdiag_nonzero_per_row = 0);
+
+      /**
+       * Initialize a rectangular matrix with @p m rows and @p n columns. The
+       * maximal number of nonzero entries for diagonal and off- diagonal
+       * blocks of each row is given by the @p row_lengths and @p
+       * offdiag_row_lengths arrays.
+       *
+       * For the meaning of the @p local_row and @p local_columns parameters,
+       * see the class documentation.
+       *
+       * Just as for the other constructors: PETSc is able to cope with the
+       * situation that more than this number of elements are later allocated
+       * for a row, but this involves copying data, and is thus expensive.
+       *
+       * The @p is_symmetric flag determines whether we should tell PETSc that
+       * the matrix is going to be symmetric (as indicated by the call
+       * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
+       * documentation states that one cannot form an ILU decomposition of a
+       * matrix for which this flag has been set to @p true, only an ICC. The
+       * default value of this flag is @p false.
+       *
+       * @deprecated This constructor is deprecated: please use the
+       * constructor with a sparsity pattern argument instead.
+       */
+      DEAL_II_DEPRECATED
+      SparseMatrix(const MPI_Comm &              communicator,
+                   const size_type               m,
+                   const size_type               n,
+                   const size_type               local_rows,
+                   const size_type               local_columns,
+                   const std::vector<size_type> &row_lengths,
+                   const bool                    is_symmetric = false,
+                   const std::vector<size_type> &offdiag_row_lengths =
+                     std::vector<size_type>());
+
+      /**
+       * Initialize using the given sparsity pattern with communication
+       * happening over the provided @p communicator.
+       *
+       * For the meaning of the @p local_rows_per_process and @p
+       * local_columns_per_process parameters, see the class documentation.
+       *
+       * Note that PETSc can be very slow if you do not provide it with a good
+       * estimate of the lengths of rows. Using the present function is a very
+       * efficient way to do this, as it uses the exact number of nonzero
+       * entries for each row of the matrix by using the given sparsity
+       * pattern argument. If the @p preset_nonzero_locations flag is @p true,
+       * this function in addition not only sets the correct row sizes up
+       * front, but also pre-allocated the correct nonzero entries in the
+       * matrix.
+       *
+       * PETsc allows to later add additional nonzero entries to a matrix, by
+       * simply writing to these elements. However, this will then lead to
+       * additional memory allocations which are very inefficient and will
+       * greatly slow down your program. It is therefore significantly more
+       * efficient to get memory allocation right from the start.
+       */
+      template <typename SparsityPatternType>
+      SparseMatrix(const MPI_Comm &              communicator,
+                   const SparsityPatternType &   sparsity_pattern,
+                   const std::vector<size_type> &local_rows_per_process,
+                   const std::vector<size_type> &local_columns_per_process,
+                   const unsigned int            this_process,
+                   const bool preset_nonzero_locations = true);
+
+      /**
+       * This operator assigns a scalar to a matrix. Since this does usually
+       * not make much sense (should we set all matrix entries to this value?
+       * Only the nonzero entries of the sparsity pattern?), this operation is
+       * only allowed if the actual value to be assigned is zero. This
+       * operator only exists to allow for the obvious notation
+       * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
+       * keep the sparsity pattern previously used.
+       */
+      SparseMatrix &
+      operator=(const value_type d);
+
+
+      /**
+       * Make a copy of the PETSc matrix @p other. It is assumed that both
+       * matrices have the same SparsityPattern.
+       */
+      void
+      copy_from(const SparseMatrix &other);
+
+      /**
+       * Throw away the present matrix and generate one that has the same
+       * properties as if it were created by the constructor of this class
+       * with the same argument list as the present function.
+       *
+       * @deprecated This overload of <code>reinit</code> is deprecated:
+       * please use the overload with a sparsity pattern argument instead.
+       */
+      DEAL_II_DEPRECATED
+      void
+      reinit(const MPI_Comm &communicator,
+             const size_type m,
+             const size_type n,
+             const size_type local_rows,
+             const size_type local_columns,
+             const size_type n_nonzero_per_row,
+             const bool      is_symmetric              = false,
+             const size_type n_offdiag_nonzero_per_row = 0);
+
+      /**
+       * Throw away the present matrix and generate one that has the same
+       * properties as if it were created by the constructor of this class
+       * with the same argument list as the present function.
+       *
+       * @deprecated This overload of <code>reinit</code> is deprecated:
+       * please use the overload with a sparsity pattern argument instead.
+       */
+      DEAL_II_DEPRECATED
+      void
+      reinit(const MPI_Comm &              communicator,
+             const size_type               m,
+             const size_type               n,
+             const size_type               local_rows,
+             const size_type               local_columns,
+             const std::vector<size_type> &row_lengths,
+             const bool                    is_symmetric = false,
+             const std::vector<size_type> &offdiag_row_lengths =
+               std::vector<size_type>());
+
+      /**
+       * Initialize using the given sparsity pattern with communication
+       * happening over the provided @p communicator.
+       *
+       * Note that PETSc can be very slow if you do not provide it with a good
+       * estimate of the lengths of rows. Using the present function is a very
+       * efficient way to do this, as it uses the exact number of nonzero
+       * entries for each row of the matrix by using the given sparsity
+       * pattern argument. If the @p preset_nonzero_locations flag is @p true,
+       * this function in addition not only sets the correct row sizes up
+       * front, but also pre-allocated the correct nonzero entries in the
+       * matrix.
+       *
+       * PETsc allows to later add additional nonzero entries to a matrix, by
+       * simply writing to these elements. However, this will then lead to
+       * additional memory allocations which are very inefficient and will
+       * greatly slow down your program. It is therefore significantly more
+       * efficient to get memory allocation right from the start.
+       */
+      template <typename SparsityPatternType>
+      void
+      reinit(const MPI_Comm &              communicator,
+             const SparsityPatternType &   sparsity_pattern,
+             const std::vector<size_type> &local_rows_per_process,
+             const std::vector<size_type> &local_columns_per_process,
+             const unsigned int            this_process,
+             const bool                    preset_nonzero_locations = true);
+
+      /**
+       * Create a matrix where the size() of the IndexSets determine the
+       * global number of rows and columns and the entries of the IndexSet
+       * give the rows and columns for the calling processor. Note that only
+       * ascending, 1:1 IndexSets are supported.
+       */
+      template <typename SparsityPatternType>
+      void
+      reinit(const IndexSet &           local_rows,
+             const IndexSet &           local_columns,
+             const SparsityPatternType &sparsity_pattern,
+             const MPI_Comm &           communicator);
+
+      /**
+       * Initialize this matrix to have the same structure as @p other. This
+       * will not copy the values of the other matrix, but you can use
+       * copy_from() for this.
+       */
+      void
+      reinit(const SparseMatrix &other);
+
+      /**
+       * Return a reference to the MPI communicator object in use with this
+       * matrix.
+       */
+      virtual const MPI_Comm &
+      get_mpi_communicator() const override;
+
+      /**
+       * @addtogroup Exceptions
+       * @{
+       */
+      /**
+       * Exception
+       */
+      DeclException2(ExcLocalRowsTooLarge,
+                     int,
+                     int,
+                     << "The number of local rows " << arg1
+                     << " must be larger than the total number of rows "
+                     << arg2);
+      //@}
+
+      /**
+       * Return the square of the norm of the vector $v$ with respect to the
+       * norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is
+       * useful, e.g. in the finite element context, where the $L_2$ norm of a
+       * function equals the matrix norm with respect to the mass matrix of
+       * the vector representing the nodal values of the finite element
+       * function.
+       *
+       * Obviously, the matrix needs to be quadratic for this operation.
+       *
+       * The implementation of this function is not as efficient as the one in
+       * the @p MatrixBase class used in deal.II (i.e. the original one, not
+       * the PETSc wrapper class) since PETSc doesn't support this operation
+       * and needs a temporary vector.
+       */
+      PetscScalar
+      matrix_norm_square(const Vector &v) const;
+
+      /**
+       * Compute the matrix scalar product $\left(u^\ast,Mv\right)$.
+       *
+       * The implementation of this function is not as efficient as the one in
+       * the @p MatrixBase class used in deal.II (i.e. the original one, not
+       * the PETSc wrapper class) since PETSc doesn't support this operation
+       * and needs a temporary vector.
+       */
+      PetscScalar
+      matrix_scalar_product(const Vector &u, const Vector &v) const;
+
+      /**
+       * Return the partitioning of the domain space of this matrix, i.e., the
+       * partitioning of the vectors this matrix has to be multiplied with.
+       */
+      IndexSet
+      locally_owned_domain_indices() const;
+
+      /**
+       * Return the partitioning of the range space of this matrix, i.e., the
+       * partitioning of the vectors that result from matrix-vector
+       * products.
+       */
+      IndexSet
+      locally_owned_range_indices() const;
+
+      /**
+       * Perform the matrix-matrix multiplication $C = AB$, or,
+       * $C = A \text{diag}(V) B$ given a compatible vector $V$.
+       *
+       * This function calls MatrixBase::mmult() to do the actual work.
+       */
+      void
+      mmult(SparseMatrix &      C,
+            const SparseMatrix &B,
+            const MPI::Vector & V = MPI::Vector()) const;
+
+      /**
+       * Perform the matrix-matrix multiplication with the transpose of
+       * <tt>this</tt>, i.e., $C = A^T B$, or,
+       * $C = A^T \text{diag}(V) B$ given a compatible vector $V$.
+       *
+       * This function calls MatrixBase::Tmmult() to do the actual work.
+       */
+      void
+      Tmmult(SparseMatrix &      C,
+             const SparseMatrix &B,
+             const MPI::Vector & V = MPI::Vector()) const;
+
+    private:
+      /**
+       * Copy of the communicator object to be used for this parallel vector.
+       */
+      MPI_Comm communicator;
+
+      /**
+       * Do the actual work for the respective reinit() function and the
+       * matching constructor, i.e. create a matrix. Getting rid of the
+       * previous matrix is left to the caller.
+       *
+       * @deprecated This overload of <code>do_reinit</code> is deprecated:
+       * please use the overload with a sparsity pattern argument instead.
+       */
+      DEAL_II_DEPRECATED
+      void
+      do_reinit(const size_type m,
+                const size_type n,
+                const size_type local_rows,
+                const size_type local_columns,
+                const size_type n_nonzero_per_row,
+                const bool      is_symmetric              = false,
+                const size_type n_offdiag_nonzero_per_row = 0);
+
+      /**
+       * Same as previous function.
+       *
+       * @deprecated This overload of <code>do_reinit</code> is deprecated:
+       * please use the overload with a sparsity pattern argument instead.
+       */
+      DEAL_II_DEPRECATED
+      void
+      do_reinit(const size_type               m,
+                const size_type               n,
+                const size_type               local_rows,
+                const size_type               local_columns,
+                const std::vector<size_type> &row_lengths,
+                const bool                    is_symmetric = false,
+                const std::vector<size_type> &offdiag_row_lengths =
+                  std::vector<size_type>());
+
+      /**
+       * Same as previous functions.
+       */
+      template <typename SparsityPatternType>
+      void
+      do_reinit(const SparsityPatternType &   sparsity_pattern,
+                const std::vector<size_type> &local_rows_per_process,
+                const std::vector<size_type> &local_columns_per_process,
+                const unsigned int            this_process,
+                const bool                    preset_nonzero_locations);
+
+      /**
+       * Same as previous functions.
+       */
+      template <typename SparsityPatternType>
+      void
+      do_reinit(const IndexSet &           local_rows,
+                const IndexSet &           local_columns,
+                const SparsityPatternType &sparsity_pattern);
+
+      /**
+       * To allow calling protected prepare_add() and prepare_set().
+       */
+      friend class BlockMatrixBase<SparseMatrix>;
+    };
+
+
+
+    // -------- template and inline functions ----------
+
+    inline const MPI_Comm &
+    SparseMatrix::get_mpi_communicator() const
+    {
+      return communicator;
+    }
+  } // namespace MPI
 } // namespace PETScWrappers
 
 DEAL_II_NAMESPACE_CLOSE
diff --git a/include/deal.II/lac/petsc_vector.h b/include/deal.II/lac/petsc_vector.h
new file mode 100644 (file)
index 0000000..1c43760
--- /dev/null
@@ -0,0 +1,573 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_petsc_vector_h
+#  define dealii_petsc_vector_h
+
+
+#  include <deal.II/base/config.h>
+
+#  ifdef DEAL_II_WITH_PETSC
+
+#    include <deal.II/base/index_set.h>
+#    include <deal.II/base/subscriptor.h>
+
+#    include <deal.II/lac/exceptions.h>
+#    include <deal.II/lac/petsc_vector_base.h>
+#    include <deal.II/lac/vector.h>
+#    include <deal.II/lac/vector_operation.h>
+#    include <deal.II/lac/vector_type_traits.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+/*! @addtogroup PETScWrappers
+ *@{
+ */
+namespace PETScWrappers
+{
+  /**
+   * Namespace for PETSc classes that work in parallel over MPI, such as
+   * distributed vectors and matrices.
+   *
+   * @ingroup PETScWrappers
+   * @author Wolfgang Bangerth, 2004
+   */
+  namespace MPI
+  {
+    /**
+     * Implementation of a parallel vector class based on PETSC and using MPI
+     * communication to synchronize distributed operations. All the
+     * functionality is actually in the base class, except for the calls to
+     * generate a parallel vector. This is possible since PETSc only works on
+     * an abstract vector type and internally distributes to functions that do
+     * the actual work depending on the actual vector type (much like using
+     * virtual functions). Only the functions creating a vector of specific
+     * type differ, and are implemented in this particular class.
+     *
+     *
+     * <h3>Parallel communication model</h3>
+     *
+     * The parallel functionality of PETSc is built on top of the Message
+     * Passing Interface (MPI). MPI's communication model is built on
+     * collective communications: if one process wants something from another,
+     * that other process has to be willing to accept this communication. A
+     * process cannot query data from another process by calling a remote
+     * function, without that other process expecting such a transaction. The
+     * consequence is that most of the operations in the base class of this
+     * class have to be called collectively. For example, if you want to
+     * compute the l2 norm of a parallel vector, @em all processes across
+     * which this vector is shared have to call the @p l2_norm function. If
+     * you don't do this, but instead only call the @p l2_norm function on one
+     * process, then the following happens: This one process will call one of
+     * the collective MPI functions and wait for all the other processes to
+     * join in on this. Since the other processes don't call this function,
+     * you will either get a time-out on the first process, or, worse, by the
+     * time the next a call to a PETSc function generates an MPI message on
+     * the other processes, you will get a cryptic message that only a subset
+     * of processes attempted a communication. These bugs can be very hard to
+     * figure out, unless you are well-acquainted with the communication model
+     * of MPI, and know which functions may generate MPI messages.
+     *
+     * One particular case, where an MPI message may be generated unexpectedly
+     * is discussed below.
+     *
+     *
+     * <h3>Accessing individual elements of a vector</h3>
+     *
+     * PETSc does allow read access to individual elements of a vector, but in
+     * the distributed case only to elements that are stored locally. We
+     * implement this through calls like <tt>d=vec(i)</tt>. However, if you
+     * access an element outside the locally stored range, an exception is
+     * generated.
+     *
+     * In contrast to read access, PETSc (and the respective deal.II wrapper
+     * classes) allow to write (or add) to individual elements of vectors,
+     * even if they are stored on a different process. You can do this
+     * writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>, or
+     * similar operations. There is one catch, however, that may lead to very
+     * confusing error messages: PETSc requires application programs to call
+     * the compress() function when they switch from adding, to elements to
+     * writing to elements. The reasoning is that all processes might
+     * accumulate addition operations to elements, even if multiple processes
+     * write to the same elements. By the time we call compress() the next
+     * time, all these additions are executed. However, if one process adds to
+     * an element, and another overwrites to it, the order of execution would
+     * yield non-deterministic behavior if we don't make sure that a
+     * synchronization with compress() happens in between.
+     *
+     * In order to make sure these calls to compress() happen at the
+     * appropriate time, the deal.II wrappers keep a state variable that store
+     * which is the presently allowed operation: additions or writes. If it
+     * encounters an operation of the opposite kind, it calls compress() and
+     * flips the state. This can sometimes lead to very confusing behavior, in
+     * code that may for example look like this:
+     * @code
+     *   PETScWrappers::MPI::Vector vector;
+     *   ...
+     *   // do some write operations on the vector
+     *   for (unsigned int i=0; i<vector.size(); ++i)
+     *     vector(i) = i;
+     *
+     *   // do some additions to vector elements, but only for some elements
+     *   for (unsigned int i=0; i<vector.size(); ++i)
+     *     if (some_condition(i) == true)
+     *       vector(i) += 1;
+     *
+     *   // do another collective operation
+     *   const double norm = vector.l2_norm();
+     * @endcode
+     *
+     * This code can run into trouble: by the time we see the first addition
+     * operation, we need to flush the overwrite buffers for the vector, and
+     * the deal.II library will do so by calling compress(). However, it will
+     * only do so for all processes that actually do an addition -- if the
+     * condition is never true for one of the processes, then this one will
+     * not get to the actual compress() call, whereas all the other ones do.
+     * This gets us into trouble, since all the other processes hang in the
+     * call to flush the write buffers, while the one other process advances
+     * to the call to compute the l2 norm. At this time, you will get an error
+     * that some operation was attempted by only a subset of processes. This
+     * behavior may seem surprising, unless you know that write/addition
+     * operations on single elements may trigger this behavior.
+     *
+     * The problem described here may be avoided by placing additional calls
+     * to compress(), or making sure that all processes do the same type of
+     * operations at the same time, for example by placing zero additions if
+     * necessary.
+     *
+     * @see
+     * @ref GlossGhostedVector "vectors with ghost elements"
+     *
+     * @ingroup PETScWrappers
+     * @ingroup Vectors
+     * @author Wolfgang Bangerth, 2004
+     */
+    class Vector : public VectorBase
+    {
+    public:
+      /**
+       * Declare type for container size.
+       */
+      using size_type = types::global_dof_index;
+
+      /**
+       * Default constructor. Initialize the vector as empty.
+       */
+      Vector();
+
+      /**
+       * Constructor. Set dimension to @p n and initialize all elements with
+       * zero.
+       *
+       * @arg local_size denotes the size of the chunk that shall be stored on
+       * the present process.
+       *
+       * @arg communicator denotes the MPI communicator over which the
+       * different parts of the vector shall communicate
+       *
+       * The constructor is made explicit to avoid accidents like this:
+       * <tt>v=0;</tt>. Presumably, the user wants to set every element of the
+       * vector to zero, but instead, what happens is this call:
+       * <tt>v=Vector@<number@>(0);</tt>, i.e. the vector is replaced by one
+       * of length zero.
+       */
+      explicit Vector(const MPI_Comm &communicator,
+                      const size_type n,
+                      const size_type local_size);
+
+
+      /**
+       * Copy-constructor from deal.II vectors. Sets the dimension to that of
+       * the given vector, and copies all elements.
+       *
+       * @arg local_size denotes the size of the chunk that shall be stored on
+       * the present process.
+       *
+       * @arg communicator denotes the MPI communicator over which the
+       * different parts of the vector shall communicate
+       */
+      template <typename Number>
+      explicit Vector(const MPI_Comm &              communicator,
+                      const dealii::Vector<Number> &v,
+                      const size_type               local_size);
+
+
+      /**
+       * Copy-constructor the values from a PETSc wrapper vector class.
+       *
+       * @arg local_size denotes the size of the chunk that shall be stored on
+       * the present process.
+       *
+       * @arg communicator denotes the MPI communicator over which the
+       * different parts of the vector shall communicate
+       *
+       * @deprecated The use of objects that are explicitly of type VectorBase
+       * is deprecated: use PETScWrappers::MPI::Vector instead.
+       */
+      DEAL_II_DEPRECATED
+      explicit Vector(const MPI_Comm &  communicator,
+                      const VectorBase &v,
+                      const size_type   local_size);
+
+      /**
+       * Construct a new parallel ghosted PETSc vector from IndexSets.
+       *
+       * Note that @p local must be ascending and 1:1, see
+       * IndexSet::is_ascending_and_one_to_one().  In particular, the DoFs in
+       * @p local need to be contiguous, meaning you can only create vectors
+       * from a DoFHandler with several finite element components if they are
+       * not reordered by component (use a PETScWrappers::BlockVector
+       * otherwise).  The global size of the vector is determined by
+       * local.size(). The global indices in @p ghost are supplied as ghost
+       * indices so that they can be read locally.
+       *
+       * Note that the @p ghost IndexSet may be empty and that any indices
+       * already contained in @p local are ignored during construction. That
+       * way, the ghost parameter can equal the set of locally relevant
+       * degrees of freedom, see step-32.
+       *
+       * @note This operation always creates a ghosted vector, which is considered
+       * read-only.
+       *
+       * @see
+       * @ref GlossGhostedVector "vectors with ghost elements"
+       */
+      Vector(const IndexSet &local,
+             const IndexSet &ghost,
+             const MPI_Comm &communicator);
+
+      /**
+       * Construct a new parallel PETSc vector without ghost elements from an
+       * IndexSet.
+       *
+       * Note that @p local must be ascending and 1:1, see
+       * IndexSet::is_ascending_and_one_to_one().  In particular, the DoFs in
+       * @p local need to be contiguous, meaning you can only create vectors
+       * from a DoFHandler with several finite element components if they are
+       * not reordered by component (use a PETScWrappers::BlockVector
+       * otherwise).
+       */
+      explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
+
+      /**
+       * Release all memory and return to a state just like after having
+       * called the default constructor.
+       */
+      virtual void
+      clear() override;
+
+      /**
+       * Copy the given vector. Resize the present vector if necessary. Also
+       * take over the MPI communicator of @p v.
+       */
+      Vector &
+      operator=(const Vector &v);
+
+      /**
+       * Set all components of the vector to the given number @p s. Simply
+       * pass this down to the base class, but we still need to declare this
+       * function to make the example given in the discussion about making the
+       * constructor explicit work.
+       */
+      Vector &
+      operator=(const PetscScalar s);
+
+      /**
+       * Copy the values of a deal.II vector (as opposed to those of the PETSc
+       * vector wrapper class) into this object.
+       *
+       * Contrary to the case of sequential vectors, this operators requires
+       * that the present vector already has the correct size, since we need
+       * to have a partition and a communicator present which we otherwise
+       * can't get from the source vector.
+       */
+      template <typename number>
+      Vector &
+      operator=(const dealii::Vector<number> &v);
+
+      /**
+       * Change the dimension of the vector to @p N. It is unspecified how
+       * resizing the vector affects the memory allocation of this object;
+       * i.e., it is not guaranteed that resizing it to a smaller size
+       * actually also reduces memory consumption, or if for efficiency the
+       * same amount of memory is used
+       *
+       * @p local_size denotes how many of the @p N values shall be stored
+       * locally on the present process. for less data.
+       *
+       * @p communicator denotes the MPI communicator henceforth to be used
+       * for this vector.
+       *
+       * If @p omit_zeroing_entries is false, the vector is filled by zeros.
+       * Otherwise, the elements are left an unspecified state.
+       */
+      void
+      reinit(const MPI_Comm &communicator,
+             const size_type N,
+             const size_type local_size,
+             const bool      omit_zeroing_entries = false);
+
+      /**
+       * Change the dimension to that of the vector @p v, and also take over
+       * the partitioning into local sizes as well as the MPI communicator.
+       * The same applies as for the other @p reinit function.
+       *
+       * The elements of @p v are not copied, i.e. this function is the same
+       * as calling <tt>reinit(v.size(), v.local_size(),
+       * omit_zeroing_entries)</tt>.
+       */
+      void
+      reinit(const Vector &v, const bool omit_zeroing_entries = false);
+
+      /**
+       * Reinit as a vector with ghost elements. See the constructor with
+       * same signature for more details.
+       *
+       * @see
+       * @ref GlossGhostedVector "vectors with ghost elements"
+       */
+      void
+      reinit(const IndexSet &local,
+             const IndexSet &ghost,
+             const MPI_Comm &communicator);
+
+      /**
+       * Reinit as a vector without ghost elements. See constructor with same
+       * signature for more details.
+       *
+       * @see
+       * @ref GlossGhostedVector "vectors with ghost elements"
+       */
+      void
+      reinit(const IndexSet &local, const MPI_Comm &communicator);
+
+      /**
+       * Return a reference to the MPI communicator object in use with this
+       * vector.
+       */
+      const MPI_Comm &
+      get_mpi_communicator() const override;
+
+      /**
+       * Print to a stream. @p precision denotes the desired precision with
+       * which values shall be printed, @p scientific whether scientific
+       * notation shall be used. If @p across is @p true then the vector is
+       * printed in a line, while if @p false then the elements are printed on
+       * a separate line each.
+       *
+       * @note This function overloads the one in the base class to ensure
+       * that the right thing happens for parallel vectors that are
+       * distributed across processors.
+       */
+      void
+      print(std::ostream &     out,
+            const unsigned int precision  = 3,
+            const bool         scientific = true,
+            const bool         across     = true) const;
+
+      /**
+       * @copydoc PETScWrappers::VectorBase::all_zero()
+       *
+       * @note This function overloads the one in the base class to make this
+       * a collective operation.
+       */
+      bool
+      all_zero() const;
+
+    protected:
+      /**
+       * Create a vector of length @p n. For this class, we create a parallel
+       * vector. @p n denotes the total size of the vector to be created. @p
+       * local_size denotes how many of these elements shall be stored
+       * locally.
+       */
+      virtual void
+      create_vector(const size_type n, const size_type local_size);
+
+
+
+      /**
+       * Create a vector of global length @p n, local size @p local_size and
+       * with the specified ghost indices. Note that you need to call
+       * update_ghost_values() before accessing those.
+       */
+      virtual void
+      create_vector(const size_type n,
+                    const size_type local_size,
+                    const IndexSet &ghostnodes);
+
+
+    private:
+      /**
+       * Copy of the communicator object to be used for this parallel vector.
+       */
+      MPI_Comm communicator;
+    };
+
+
+    // ------------------ template and inline functions -------------
+
+
+    /**
+     * Global function @p swap which overloads the default implementation of
+     * the C++ standard library which uses a temporary object. The function
+     * simply exchanges the data of the two vectors.
+     *
+     * @relatesalso PETScWrappers::MPI::Vector
+     * @author Wolfgang Bangerth, 2004
+     */
+    inline void
+    swap(Vector &u, Vector &v)
+    {
+      u.swap(v);
+    }
+
+
+#    ifndef DOXYGEN
+
+    template <typename number>
+    Vector::Vector(const MPI_Comm &              communicator,
+                   const dealii::Vector<number> &v,
+                   const size_type               local_size)
+      : communicator(communicator)
+    {
+      Vector::create_vector(v.size(), local_size);
+
+      *this = v;
+    }
+
+
+
+    inline Vector &
+    Vector::operator=(const PetscScalar s)
+    {
+      VectorBase::operator=(s);
+
+      return *this;
+    }
+
+
+
+    template <typename number>
+    inline Vector &
+    Vector::operator=(const dealii::Vector<number> &v)
+    {
+      Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
+
+      // FIXME: the following isn't necessarily fast, but this is due to
+      // the fact that PETSc doesn't offer an inlined access operator.
+      //
+      // if someone wants to contribute some code: to make this code
+      // faster, one could either first convert all values to PetscScalar,
+      // and then set them all at once using VecSetValues. This has the
+      // drawback that it could take quite some memory, if the vector is
+      // large, and it would in addition allocate memory on the heap, which
+      // is expensive. an alternative would be to split the vector into
+      // chunks of, say, 128 elements, convert a chunk at a time and set it
+      // in the output vector using VecSetValues. since 128 elements is
+      // small enough, this could easily be allocated on the stack (as a
+      // local variable) which would make the whole thing much more
+      // efficient.
+      //
+      // a second way to make things faster is for the special case that
+      // number==PetscScalar. we could then declare a specialization of
+      // this template, and omit the conversion. the problem with this is
+      // that the best we can do is to use VecSetValues, but this isn't
+      // very efficient either: it wants to see an array of indices, which
+      // in this case a) again takes up a whole lot of memory on the heap,
+      // and b) is totally dumb since its content would simply be the
+      // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
+      // function in Petsc that would take a pointer to an array of
+      // PetscScalar values and simply copy n elements verbatim into the
+      // vector...
+      for (size_type i = 0; i < v.size(); ++i)
+        (*this)(i) = v(i);
+
+      compress(::dealii::VectorOperation::insert);
+
+      return *this;
+    }
+
+
+
+    inline const MPI_Comm &
+    Vector::get_mpi_communicator() const
+    {
+      return communicator;
+    }
+
+#    endif // DOXYGEN
+  }        // namespace MPI
+} // namespace PETScWrappers
+
+namespace internal
+{
+  namespace LinearOperatorImplementation
+  {
+    template <typename>
+    class ReinitHelper;
+
+    /**
+     * A helper class used internally in linear_operator.h. Specialization for
+     * PETScWrappers::MPI::Vector.
+     */
+    template <>
+    class ReinitHelper<PETScWrappers::MPI::Vector>
+    {
+    public:
+      template <typename Matrix>
+      static void
+      reinit_range_vector(const Matrix &              matrix,
+                          PETScWrappers::MPI::Vector &v,
+                          bool /*omit_zeroing_entries*/)
+      {
+        v.reinit(matrix.locally_owned_range_indices(),
+                 matrix.get_mpi_communicator());
+      }
+
+      template <typename Matrix>
+      static void
+      reinit_domain_vector(const Matrix &              matrix,
+                           PETScWrappers::MPI::Vector &v,
+                           bool /*omit_zeroing_entries*/)
+      {
+        v.reinit(matrix.locally_owned_domain_indices(),
+                 matrix.get_mpi_communicator());
+      }
+    };
+
+  } // namespace LinearOperatorImplementation
+} /* namespace internal */
+
+/**@}*/
+
+
+/**
+ * Declare dealii::PETScWrappers::MPI::Vector as distributed vector.
+ *
+ * @author Uwe Koecher, 2017
+ */
+template <>
+struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
+{};
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#  endif // DEAL_II_WITH_PETSC
+
+#endif
+/*------------------------- petsc_vector.h -------------------------*/
index 7d86d56caed088b9acd8b86e085fa845728c6a30..c5792d876d0c6ca3b57f5530ca5c7eb2001e1afb 100644 (file)
@@ -29,7 +29,7 @@
 #include <boost/io/ios_state.hpp>
 
 #ifdef DEAL_II_WITH_PETSC
-#  include <deal.II/lac/petsc_parallel_vector.h>
+#  include <deal.II/lac/petsc_vector.h>
 #endif
 
 #ifdef DEAL_II_WITH_TRILINOS
index e5f1752785e3c0361df88f7dfe139b0d743c6642..0faa248c39f90d4a1b618ac26c803c3093c9c3f6 100644 (file)
@@ -26,8 +26,8 @@
 #include <deal.II/lac/block_sparsity_pattern.h>
 #include <deal.II/lac/block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/trilinos_sparse_matrix.h>
 #include <deal.II/lac/vector_memory.h>
index da3c695f76dcc73eac68a127405f2478a3921aed..882b6c5745941539106c4d181922146841617182 100644 (file)
@@ -25,7 +25,7 @@
 
 #include <deal.II/grid/tria_iterator.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/trilinos_epetra_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 2b8175afc7bc96c850258dbdc8806d483d2e1bf6..f5252acd04293aa9a6250555378fde63656bebf7 100644 (file)
@@ -43,8 +43,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 43addb419bc3e16d208a88f148dfe515ee521c9b..092507fc849bbeb36e577479adc5c63e2664a761 100644 (file)
 #include <deal.II/numerics/matrix_tools.h>
 
 #ifdef DEAL_II_WITH_PETSC
-#  include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#  include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#  include <deal.II/lac/petsc_parallel_vector.h>
+#  include <deal.II/lac/petsc_block_sparse_matrix.h>
 #  include <deal.II/lac/petsc_sparse_matrix.h>
+#  include <deal.II/lac/petsc_vector.h>
 #endif
 
 #ifdef DEAL_II_WITH_TRILINOS
index d43169b8fdb4ad6cf794c3a512560c2e61e5aef5..8980930b52bab6cf1c7f2c99bd9cb32a670328c3 100644 (file)
@@ -62,8 +62,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/solver_gmres.h>
index 983e3c567982dae06ba4bf6d0b76cd94321a017c..e8fb2b46961bbb8b177b1c732e61e2b50e3c5d76 100644 (file)
@@ -28,8 +28,8 @@
 #  include <deal.II/base/mpi.h>
 #  include <deal.II/base/parameter_handler.h>
 #  ifdef DEAL_II_WITH_PETSC
-#    include <deal.II/lac/petsc_parallel_block_vector.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_block_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 #  endif
 #  include <deal.II/lac/vector.h>
 #  include <deal.II/lac/vector_memory.h>
index 9971910b52469b5e340665ef9698fbedfdbba26b..f57d9795b1e94a7df5af2a8a0c97ce1b16eb4bed 100644 (file)
@@ -34,8 +34,8 @@
 #  endif
 
 #  ifdef DEAL_II_WITH_PETSC
-#    include <deal.II/lac/petsc_parallel_block_vector.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_block_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 #  endif
 
 DEAL_II_NAMESPACE_OPEN
index be8fcb2c61d0cdd867e5cc25a310e9401e896aff..969c0003935ad8bd11a1afd4d85e6459bc4f75f0 100644 (file)
@@ -27,8 +27,8 @@
 #  include <deal.II/base/mpi.h>
 #  include <deal.II/base/parameter_handler.h>
 #  ifdef DEAL_II_WITH_PETSC
-#    include <deal.II/lac/petsc_parallel_block_vector.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_block_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 #  endif
 #  include <deal.II/lac/vector.h>
 #  include <deal.II/lac/vector_memory.h>
index 715ce2b508460769cf7c7e3e90dde18d69b04041..6b86a1a213fdeeb759a1122c41775564a62fcdb0 100644 (file)
@@ -25,8 +25,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 88ab64ce44fee5745b3f3f8caa64ebdad9262d48..b7fa8393c274635c575008882c4d168e9820915d 100644 (file)
@@ -37,8 +37,8 @@
 #endif
 
 #ifdef DEAL_II_WITH_PETSC
-#  include <deal.II/lac/petsc_parallel_block_vector.h>
-#  include <deal.II/lac/petsc_parallel_vector.h>
+#  include <deal.II/lac/petsc_block_vector.h>
+#  include <deal.II/lac/petsc_vector.h>
 
 #  include <petscsys.h>
 #endif
index a4efc6f7a7ba79369af376f05ae33c2bded67f61..9bb02320eb6317ae2f46fe6352fc14b355e29542 100644 (file)
@@ -18,8 +18,8 @@
 #include <deal.II/lac/block_vector.h>
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_epetra_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 3782c328de4f28d7f7030c5ac09c872219b90155..6fb228887267593c35eb1580751b03c6a04287cf 100644 (file)
@@ -30,8 +30,8 @@
 #  include <deal.II/lac/block_vector.h>
 #  include <deal.II/lac/la_parallel_block_vector.h>
 #  include <deal.II/lac/la_parallel_vector.h>
-#  include <deal.II/lac/petsc_parallel_block_vector.h>
-#  include <deal.II/lac/petsc_parallel_vector.h>
+#  include <deal.II/lac/petsc_block_vector.h>
+#  include <deal.II/lac/petsc_vector.h>
 #  include <deal.II/lac/trilinos_parallel_block_vector.h>
 #  include <deal.II/lac/trilinos_vector.h>
 #  include <deal.II/lac/vector.h>
index 6b8aeabdfa0d51e6cb8e8d679d1ad3c8f1a1e5c3..4f1bdc2b03162341b6bd3a6e8be0fb687457a585 100644 (file)
@@ -28,8 +28,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 2c63ef879096bfc3d931d42a1eb9a06aec71eb96..556dcd48c8f756408cccad8f49c1d2ae26aa8117 100644 (file)
@@ -28,8 +28,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index ffcab279fbbbbbb4c6d213ecc149951c9e0c99af..8f17342c522e9b45cbeb2215a2c0debc302c0a16 100644 (file)
@@ -36,8 +36,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 199e98fa187d6266b2a3ea1cec970d3c2be416ac..3419ffc6627b1f0a183fe7ae8b2265485c7caa52 100644 (file)
@@ -41,8 +41,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index d7f10d14274f6fae894d0b89fae4999e7e4e2cb9..666a7bef6e04a4acb062eb2ba1737d04c70ec17e 100644 (file)
@@ -27,8 +27,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index ac2eb9afd5458894366099ac43f0ede438095a3e..c95ebb6981c66815928d63d440e39fd7f7ecde9b 100644 (file)
@@ -31,8 +31,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index eb2b25086cd305ae49371cab35a32292e4cadaf2..fd8408f507ecc71ba0d78ba29196f7b12afe45ca 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <deal.II/lac/block_matrix_array.h>
 #include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/vector.h>
 
index 8cd7b94b903723d0be59f3a2c93524062a3ea72c..7ec261dfd40aaecd4de8b1816a9ad61f2487f33e 100644 (file)
@@ -20,7 +20,6 @@
 #  include <deal.II/lac/exceptions.h>
 #  include <deal.II/lac/petsc_compatibility.h>
 #  include <deal.II/lac/petsc_full_matrix.h>
-#  include <deal.II/lac/petsc_parallel_sparse_matrix.h>
 #  include <deal.II/lac/petsc_sparse_matrix.h>
 #  include <deal.II/lac/petsc_vector_base.h>
 
index e513192246fac1371e35dadbdb8e2d569e8c8fa4..da0e508e0716ccca8a5c2cf174c2aefe7f35a518 100644 (file)
@@ -13,7 +13,7 @@
 //
 // ---------------------------------------------------------------------
 
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
 
 #ifdef DEAL_II_WITH_PETSC
 
index b4e6d9ed5fb10d62f48d4de4cf2c5ab8fb05b7f0..11acbe1e1f56da8f69b55481fe7882b3dc55941e 100644 (file)
@@ -13,7 +13,7 @@
 //
 // ---------------------------------------------------------------------
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #ifdef DEAL_II_WITH_PETSC
 
index ae3f970d334457cda7a433533dd15aa0f8e50f38..88178237dd3deabb7b6a158d843805db120b5235 100644 (file)
@@ -13,7 +13,7 @@
 //
 // ---------------------------------------------------------------------
 
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
 
 #ifdef DEAL_II_WITH_PETSC
 
@@ -22,7 +22,7 @@
 #  include <deal.II/lac/dynamic_sparsity_pattern.h>
 #  include <deal.II/lac/exceptions.h>
 #  include <deal.II/lac/petsc_compatibility.h>
-#  include <deal.II/lac/petsc_parallel_vector.h>
+#  include <deal.II/lac/petsc_vector.h>
 #  include <deal.II/lac/sparsity_pattern.h>
 
 DEAL_II_NAMESPACE_OPEN
index 22621dbc7dc0ae34a7613d521da0aaae6f11d679..96a3e3d1d04f038b9023abf9c4835093c167b7c5 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <deal.II/base/mpi.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #ifdef DEAL_II_WITH_PETSC
 
index 1a5b5843f8133d06b889fc49777129e0a21bab64..7688d61d839f72f54a96e8e7a0955d311ab324a6 100644 (file)
@@ -22,7 +22,7 @@
 
 #  include <deal.II/lac/exceptions.h>
 #  include <deal.II/lac/petsc_compatibility.h>
-#  include <deal.II/lac/petsc_parallel_vector.h>
+#  include <deal.II/lac/petsc_vector.h>
 
 #  include <cmath>
 
index 2c9ae3a70859f67f803b563da8eff14106b86c4a..6d432ae184b8be2086068ff55c3f3d3502f35201 100644 (file)
@@ -18,8 +18,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index d1b5b74a00740db8448e96774ad3d1f002f26957..0b5e84c3ae45155fd1096dc50b4acf1af3961388 100644 (file)
@@ -18,8 +18,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index c481784f102685e213408658ec4122b012a56f43..bb7faf41c415c12f0517d3d3680291fbf6ecedcc 100644 (file)
@@ -18,8 +18,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 0b60cf2f0bf97e957674370753f2d0c804999871..8744c6cf40499804b08e3f6a4663ebf4bb3f947a 100644 (file)
@@ -17,7 +17,7 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index e98d0da4372b963fc0a5e31dd413629c033e2425..af0f49d958f384bb0d69186e6ea320308a359a31 100644 (file)
@@ -18,8 +18,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 23f520c46456d4081c8747ac6513f46d68bf30e6..380c081a874287ef9f327ca60c2ed22593f86e2f 100644 (file)
@@ -27,8 +27,7 @@
 
 #include <deal.II/lac/block_sparse_matrix.h>
 #include <deal.II/lac/block_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/sparsity_pattern.h>
index 9947e83fc8f29a071cb473aa5f0fa7f5d7ea405a..79b9a37ec89ae692c9749bb7ff412796a2b56d3e 100644 (file)
@@ -17,8 +17,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_epetra_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index a040fd05d1bc20203246ee7b00f40ea234ede955..e085c3d3825be4fcdebaefef73da53dd485dd4c3 100644 (file)
@@ -17,8 +17,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_epetra_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 39448e6bb2b9772b6b488487569d7ee9668a5296..a2a1311d6ad366d1d8c9a6fa4f033dc6b4935f03 100644 (file)
@@ -17,8 +17,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_epetra_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index ac0086e2f4bc9f9baf684adc7aec677ed18c1ab3..f4bdf5a23732b1670bf13c91f7983998868b27be 100644 (file)
@@ -36,8 +36,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index ce7c4375d05a0f0795af382aaa13e9467df8736d..431331c807df2f1eba5ea90339adeda7d8b6f5bb 100644 (file)
@@ -21,8 +21,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 824dea8568dfbe319c5383e58c29b678601946b1..eef1d7454a21f2aae67f0e2dbc5590239ce1070d 100644 (file)
@@ -39,8 +39,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 409f59745a3e62663dd93dd1a657b350a653f9a2..aa996989e7cb7a65fa0b752baf16b65d5ec65e3c 100644 (file)
@@ -23,8 +23,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 556d6825b8e81a28def08117c598ef2128a62b88..a5f62cd2236e6fa20c5469b37d4588d30b5a7a5a 100644 (file)
 #include <deal.II/numerics/matrix_tools.h>
 
 #ifdef DEAL_II_WITH_PETSC
-#  include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#  include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#  include <deal.II/lac/petsc_parallel_vector.h>
+#  include <deal.II/lac/petsc_block_sparse_matrix.h>
 #  include <deal.II/lac/petsc_sparse_matrix.h>
+#  include <deal.II/lac/petsc_vector.h>
 #endif
 
 #ifdef DEAL_II_WITH_TRILINOS
index 3f2e565e1984247fbf6057b125877e6359f7dddf..aded4396a49c5519c4f64bb4a0ff77e26bbde964 100644 (file)
@@ -40,9 +40,9 @@
 #include <deal.II/numerics/matrix_tools.h>
 
 #ifdef DEAL_II_WITH_PETSC
+#  include <deal.II/lac/petsc_block_sparse_matrix.h>
+#  include <deal.II/lac/petsc_block_vector.h>
 #  include <deal.II/lac/petsc_matrix_base.h>
-#  include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#  include <deal.II/lac/petsc_parallel_block_vector.h>
 #  include <deal.II/lac/petsc_vector_base.h>
 #endif
 
index 7055a47f764fb744158d5718ddc3f3c4dcd57cef..d082043b61056ff5e14a158cbf3f218e5c010c1b 100644 (file)
@@ -18,8 +18,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 4acd75edb05d76516d02760227b6d7498167e23d..9e77a70d0b13a1a4736b9ac91ac793697ee84605 100644 (file)
@@ -30,8 +30,8 @@
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/la_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/vector.h>
index 6a1b6547eedb43f9eae568d50a9668ba9d44a8ab..3f8397e7b62db98be3acd4ff95e8a14cea60fc9e 100644 (file)
@@ -28,8 +28,8 @@
 #    include <deal.II/lac/trilinos_vector.h>
 #  endif
 #  ifdef DEAL_II_WITH_PETSC
-#    include <deal.II/lac/petsc_parallel_block_vector.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_block_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 #  endif
 #  include <deal.II/base/utilities.h>
 
index 4c6902c04496fb03bedea5c414aca46f13c01673..f929cc4c8115c44fe3914126d75e1e03a0fc859c 100644 (file)
@@ -28,8 +28,8 @@
 #    include <deal.II/lac/trilinos_vector.h>
 #  endif
 #  ifdef DEAL_II_WITH_PETSC
-#    include <deal.II/lac/petsc_parallel_block_vector.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_block_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 #  endif
 #  include <deal.II/base/utilities.h>
 
index cc0c1a664695ccdb581b02084cd7ff7cf11010a3..273bf3a3555f85c927aec9979d4e301a3039da27 100644 (file)
@@ -28,8 +28,8 @@
 #    include <deal.II/lac/trilinos_vector.h>
 #  endif
 #  ifdef DEAL_II_WITH_PETSC
-#    include <deal.II/lac/petsc_parallel_block_vector.h>
-#    include <deal.II/lac/petsc_parallel_vector.h>
+#    include <deal.II/lac/petsc_block_vector.h>
+#    include <deal.II/lac/petsc_vector.h>
 #  endif
 #  include <deal.II/base/utilities.h>
 
index 73f6381bf5a7fd1a36278249a0d185dc983a893d..f66bc9835c8cbf9c8d011d5272eccd183c95ca54 100644 (file)
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/parpack_solver.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
 
index 3a708d2a215976fcf359d408782b189572521dbb..9a54c1c7ad1c8c7871b5e403ec4a5dccdf7d5fd8 100644 (file)
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/parpack_solver.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
 
index b32b45f3617df80bdf717499778d56e14887bc42..269bbc042d4cee54184aeeb67d15e87b3f3d1385 100644 (file)
 #include <deal.II/hp/fe_values.h>
 #include <deal.II/hp/q_collection.h>
 
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/sparsity_tools.h>
 
index 92b339dbaa2c8bafeda044f43cf126f0a1261092..f6d41273451cc40d98564c6fd0ce7e1fc8cb25ee 100644 (file)
 #include <deal.II/hp/fe_values.h>
 #include <deal.II/hp/q_collection.h>
 
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/sparsity_tools.h>
 
index fabccf83f4ee3a6accecab001fbca1d5c86e30bf..540c1bf7551c4f893251e3005bc91a9fb9698399 100644 (file)
@@ -21,8 +21,8 @@
 
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/generic_linear_algebra.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 6370f7414d372a218e9aec5a5c7b1a045cb528c7..600c05c04e385dada959760bfcaff6f38181fd5e 100644 (file)
 #include <deal.II/hp/q_collection.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index cde61136132091e12fa88d23b8f7b48d3e9ab830..08469d737fcfcf5c4f152456a93f640366905bc4 100644 (file)
 #include <deal.II/hp/q_collection.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 9a90be232301a3177759324b99234d803f87fc78..362a4865ab5a9183bae24d679cd89bda901df3a5 100644 (file)
 #include "../tests.h"
 
 // Vectors:
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 // Block Matrix and Vectors:
-#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_sparse_matrix.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 
 using namespace dealii;
index d73ee1492267669971ae0ed83e7b3bf11a365b1f..7b719de54a77afd90e32666895fa3e4d1a5dc8ec 100644 (file)
@@ -19,8 +19,8 @@
 // 3.98974 > 3.95906 > 3.90828 > 3.83792
 
 #include <deal.II/lac/petsc_compatibility.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/utilities.h>
index 352108878ad1e9e88b7465fb4d4f263eeaa2f01e..072eab7536b359ad041427ff88e45b9c08510527 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <deal.II/base/mpi.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include "../tests.h"
index 70db1b7e2e7f5a8d991da09d9d3d9853971612ad..37a2f878648c6119a3fedc91749b0ad2a56fd470 100644 (file)
@@ -17,8 +17,8 @@
 
 // check is_serial_vector type trait
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index 8b6047521ae4fa00b33b7411dd8617bf23292916..78752a8a51478dbb15a17dd210caa46430549b34 100644 (file)
@@ -45,7 +45,7 @@
 #include <deal.II/lac/filtered_matrix.h>
 #include <deal.II/lac/full_matrix.h>
 #include <deal.II/lac/identity_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparse_matrix.h>
index 532fea53f298b0e12ec068db7480c3e5b3ec0df0..8e09f05c72e40cd082bbaaed6db0afa925764fd0 100644 (file)
@@ -47,7 +47,7 @@
 #include <deal.II/lac/filtered_matrix.h>
 #include <deal.II/lac/full_matrix.h>
 #include <deal.II/lac/identity_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparse_matrix.h>
index da435b733ccd55006eed36a22e6f9d0b68cfcf87..296f3622f108d44e5c4ac7d28ddb38e95c48b89e 100644 (file)
@@ -45,7 +45,7 @@
 #include <deal.II/lac/filtered_matrix.h>
 #include <deal.II/lac/full_matrix.h>
 #include <deal.II/lac/identity_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparse_matrix.h>
index 89d9d363c380a8e58d91ed63f7624c22c1fc12c7..b7ece7f3d7719520e860e87da23346fb779bb66b 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <deal.II/base/index_set.h>
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 
 #include <iostream>
index d56d28744e2c532557565b1aa9b75c604cf35406..aad8571074911ff56b0c795443f8b30c78a3281c 100644 (file)
@@ -20,7 +20,7 @@
 
 #include <deal.II/base/index_set.h>
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_parallel_block_vector.h>
 
 #include <iostream>
index b90088b7a8418e7fbbb1e2370801ffa85032c84b..a012ef6824847c21d47389061b576b8f57b36198 100644 (file)
@@ -32,7 +32,7 @@
 #include <deal.II/grid/tria_accessor.h>
 
 #include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index 8707c12e20d2ac1d3efe3a00d151d66edeb78974..24e12c66b72d6fbdf749ded4c546fba5bd7a7f06 100644 (file)
@@ -28,7 +28,7 @@
 
 #include <deal.II/grid/grid_generator.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index 70d83806b2a65784d9b654909bb154c702e12e34..c90488ab481673fa5b3cc99db584e1805004d9e1 100644 (file)
@@ -25,8 +25,8 @@
 
 #include <deal.II/grid/grid_generator.h>
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index d6b98c6e69a5d615bce9cdaa5d43612fd5c06b00..708ffa71fc7cf72d0d2d508a39c56415422d919e 100644 (file)
@@ -34,7 +34,7 @@
 #include <deal.II/grid/grid_tools.h>
 
 #include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <deal.II/numerics/data_out.h>
 #include <deal.II/numerics/data_out_faces.h>
index 10d88078e9c22c61a3105c7c76e747adf4ac2aea..ab1d3e6a2940bf2f46badd995824af56f63db9a0 100644 (file)
@@ -14,7 +14,7 @@
 // ---------------------------------------------------------------------
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "fe_tools_extrapolate_common.h"
 
index c0fb06646513752fd7cf9b8253cc1f1e7e6db9ef..00e2b8551e2baf077b5a7f67f2ad13f2e62b2ffe 100644 (file)
@@ -14,7 +14,7 @@
 // ---------------------------------------------------------------------
 
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #include "fe_tools_extrapolate_common.h"
 
index 3e7f1663946c35d3cc0250de873e5959184a187a..26feae884facda7ddeb2a64e488186284b8900bd 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <deal.II/base/index_set.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b49ee9e11f145cb8675d6cdbbce5b49a8136d26d..265d673ff2417f615e7719fb58f1680ae213411b 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <deal.II/base/index_set.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 8951a9ee676c6f5c640983570126df9f3acef4f5..067e9e5e34a74688e7f970140cf50c99cb999f87 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <deal.II/base/index_set.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index a53240765c3807af16eeb9ba72d8257b89e2f11b..33f9939162e54ba3b1ef057fa3fbaee3984ab5ea 100644 (file)
@@ -43,8 +43,8 @@
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/trilinos_precondition.h>
 #include <deal.II/lac/trilinos_solver.h>
index 2e4ebbc9ef494ab07bca820ff9117d2b120374b7..1f5c3954ca80182ca21b8ff30cd74f203634541d 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 32b12d9454922cc60285168698631465e39c5015..00ddd4dd4c74a99eea1a997258aad2e438d31f1b 100644 (file)
@@ -41,7 +41,7 @@
 #include <deal.II/grid/tria_accessor.h>
 #include <deal.II/grid/tria_iterator.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <deal.II/numerics/data_out.h>
 #include <deal.II/numerics/vector_tools.h>
index dceb5dc59779dc1a69851ad5d042b06dcb50470f..e6e9592dad35805e6ba2092c5f916a8fb0145fb7 100644 (file)
@@ -35,7 +35,7 @@
 #include <deal.II/grid/tria_accessor.h>
 #include <deal.II/grid/tria_iterator.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 
 #include <deal.II/numerics/vector_tools.h>
index 3d3d5ac8fc82dd1bb629dd5ba593d99bd131eda9..6d68b9d0672a5f2054c20ab0082e3e1a1f2c2cbf 100644 (file)
@@ -34,7 +34,7 @@
 #include <deal.II/grid/tria.h>
 #include <deal.II/grid/tria_accessor.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index c4a75f7f85e3d202f7bd78c5a60a883a45ba5aaf..1ab383a4d77a067137f62c957ff4be48f33e2ad2 100644 (file)
@@ -34,7 +34,7 @@
 #include <deal.II/grid/tria.h>
 #include <deal.II/grid/tria_accessor.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index 3fd41bf6bd5aaf8c5154f50497f49f06a3c16067..9780e2dfaeac54edcdf8fcb025c757309708ec4a 100644 (file)
@@ -35,7 +35,7 @@
 #include <deal.II/grid/tria.h>
 #include <deal.II/grid/tria_accessor.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index a21bf3c275e99b46ca9b84470cd22b0b189e3b76..c908edf673410c2d513d8ecb5af3de632e988b05 100644 (file)
@@ -34,7 +34,7 @@
 #include <deal.II/grid/tria.h>
 #include <deal.II/grid/tria_accessor.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index 81aa942ad4b18d1c16c4e0903572cbf4dbb3ee62..fc2df4bf55badbfaebaccff585884917b82ef846 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 86dd8706e5e15d92ffdd9fc97dde720c8ad9080a..1baf79e6a6608661a79a9a6379a58e3934e1c2c4 100644 (file)
@@ -23,7 +23,6 @@
 #include <deal.II/base/utilities.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
 
 #include "../tests.h"
index 74f27f666b1c0e422c128996a5124106d1892321..383a68433d38b029d2ee94f64c44e8f8a2b77df4 100644 (file)
@@ -22,7 +22,6 @@
 #include <deal.II/base/utilities.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
 
 #include "../tests.h"
index cda031197a33932e0564cf5e7da4626f353fa94a..9bd90ba4e62e8a10866ae90f203e93fcf2e2ba45 100644 (file)
@@ -20,8 +20,8 @@
 
 #include <deal.II/base/utilities.h>
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include "../tests.h"
index 0316ad1611df760140448ea75d4867b9b2a8d627..084fd40d441c5dbbdd052dad2545f455aaadfa37 100644 (file)
 
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver_control.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 5d790792b9720b4bf7520aee3d3bc3109594e784..7bbae024843542d4e0e7d24422e9722d7dea72a2 100644 (file)
@@ -28,7 +28,7 @@
 // x_j's so that we can verify the correctness analytically
 
 #include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <sstream>
 
index cb6010a4cd9d97eb3bcec6e7be07602f24660344..3a85ae6210a89020e949bb984028afce44bf2a21 100644 (file)
@@ -22,7 +22,7 @@
 // contiguous
 
 #include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #include <sstream>
 
index ae8349d2eaf2bc9d5e8c1c732b7608d8a0ac3f3a..5fd2327fb7e387c5f547139a3b54eda2e55c14dd 100644 (file)
@@ -20,7 +20,7 @@
 // like _01, but with an inhomogeneity
 
 #include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <sstream>
 
index 26a3f78dd0a7c7fe3dc72f76641348b5c69e3621..f51f773962fd1a388b0f3eb5e59c3b4baafba59c 100644 (file)
@@ -22,7 +22,7 @@
 // contiguous
 
 #include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #include <sstream>
 
index 52c0d7989ccfd96360fd068c0afb51e0190fde1d..f789e2ecf49116d98460d0c609a6616d25abb0fd 100644 (file)
@@ -39,7 +39,7 @@
 #include <deal.II/grid/tria_accessor.h>
 #include <deal.II/grid/tria_iterator.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <deal.II/numerics/data_out.h>
 #include <deal.II/numerics/vector_tools.h>
index e4486a4258f977dd4af128dc834b2665da40f93a..145dc92c0459037ead16c55740e75ae013d82e4c 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 763c8501e80d1285ad8ae0edc430d190431c5d0d..7d276f508fba7055b654f83eb0413d5ab45f9547 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 3f7be85e074ad27923c4ff7c2edd2bc05a010a94..55791689c10b4afd820011fbc588817e70f14ac5 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index f2f410902799a8f1f2fae8a9fee15c9c887eb5fa..f94e6691eb8b87e629587830825a2a5047382e7e 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index db52a3ba5d9b5af40c8448541976efcced5353bf..7fc1e18fa4418089a232eb942d5c125a9480be19 100644 (file)
@@ -33,7 +33,7 @@
 #include <deal.II/grid/tria_iterator.h>
 
 #include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <deal.II/multigrid/mg_constrained_dofs.h>
index 20db5d7abe7da0d153f569d43940ab95a7e0ad32..e0431b836d637b8fcd62cf5561f8bdd0ee2cf61c 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::size()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 
index e0f9d4f3ca1ef844078542eae03f1e1381e53ee1..82627117ca3f5e1935dfb35dc786fad2bc1102c2 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator() in set-mode
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b3740eeff63af2500cdbb769096152752b6f30a9..fdb2b84c3f7ca0d656d5268f1f5dcd7657e0dcb0 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator() in add-mode
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 32c695f366281996d934dcc8331fbc4504391331..efc39d4b950c6ec567c0a5826691d1607b873f24 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::l1_norm()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 0cdd50a54704c107ca19827c9d1c4bfe189e41cf..eddbf15b2e35efe83e93d3ff3b1ece7f0cdc0dee 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::l2_norm()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 8357ccaf6d8c195242f2116a28f2edeed3204594..5007926fe9b16272094da238cc040566956aeaa2 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::linfty_norm()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index ab904c01a2c7829986f3426244a760817f9cf036..1c5eeccd712dca0521aee644b485932daecefe93 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator *=
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index c278f7079231ae70155b9e12106ac1b0a4654560..3ed97ab29aeba8c0b2db54f68095cc5b423f0a1e 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator /=
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index e40e8b138297d31ca6f4cb512ff9657618368bd6..848133c7c7aa1a6f5907c329739226d004ac4afb 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are
 // orthogonal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 27f9b942943033c4ba6616daa1bf7336dbdd980f..e4e8570365a51fd95b59134d985a168ff09285e0 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are
 // not orthogonal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 4508c1268a2e117831f3a0c27927d854772c5216..8a08e28c0a0c64fbb535e691df21d9cdb0f37329 100644 (file)
@@ -19,7 +19,7 @@
 // this function has since been removed, so we test for v=0 instead, although
 // that may be covered by one of the other tests
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 7cc0bd38f7c0a63612e374be5d9f75eda3bdc4dc..82e0773c7cca7a8836d30dbfd3aae471140b728b 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator = (PetscScalar) with setting to a
 // nonzero value
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 72089a371dc027f49454794b6ad73c3300428c62..0be92317e93ccb402d70e559c1e7de02ed77fb93 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator = (Vector)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 6d7097129d2d02afb58df6027bc815a504b83588..829a3e1f4234dfa1ab581123d6ced3cb85dee29c 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator = (Vector), except that we don't
 // resize the vector to be copied to beforehand
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 6089f2709dbde4d04f1833a5a57a3d162fdbd649..8b1e588e3ec35c8e7001ad6c4afed3b56e335214 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::reinit(fast)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index dd9b6ee479abc5f5a1878f0a805e32ddaa36f9da..4bf77225e6ff69804c3742fd04c84a411d4bc21a 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::reinit(!fast)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index ad0855f698c6b44b971439c509af795b2554f6f3..55150a1364acec8f058d239d59e62696fd1d228c 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::l2_norm()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 5c28123963779ab8cbab003cfb45d37cd2b2034b..3a7be822171073ae9a39bad3aff0243caa3e735b 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::mean_value()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 8fd6ee18642be5f54d4dbd6d23d427f80de79057..0f48f143b20a9735afb49ac5ca4132bf119eed24 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::lp_norm(3)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 3677e3a62586b14481c9ad97e64a3b3a9d2dcf53..b7fa1d0522e1c1f04f1318f8416407baff6bc0f5 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::all_zero
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index a01736e4dc74e4fda6bad9ac881c16c5ab0ca04b..338568706be391c1253304cbbdba471a6692ca8f 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator+=(Vector)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 2c93aceb116ac9358095807ce405599a44959bd5..13a437a5fe3071d8337994aa0f412894e78806e6 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator-=(Vector)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index a4a8eafe8760c9e4b449c26be8ebe79573aac583..48470ed1acde93213fba54f65f3cc83db978e11e 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::add (scalar)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 3150d8ce8594b5f9dfedb02af8b511fd694fccf7..e71dff2d7c36432975076ff024cfc9add7354074 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::add(scalar, Vector)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index ee3e06d91ae59b1653ec65734b12d05fbad9b6f9..f76bf5960b3afada331e01adefda4f6fdb73f161 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::add(s,V,s,V)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 2fdbbb703501caff4837890442a2d5f6e7f31ec5..58387bc7ae6dbad93b2a605239119b9d3b8b3273 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::sadd(s, Vector)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 56aea8764d9310e017a8cde24fe3e031cddd1328..6818d293ce54d2d8a83e5c86c936b3f359a4caf5 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::sadd(scalar, scalar, Vector)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index c22aac23b694df1f1dc2cbbd8a12e758365a60dd..1836acb31e2e6b26bec0298944d9a028ae1444d6 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::scale
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b6d1009993dca6abb895d883e2236e1032343c7b..c7e6d8f5004fc60fe621f30a197e62324850f90d 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::equ (s,V)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 5b3cddebabf586eba3928a74aef42724e221982f..45f877e494f68f4216ac54a55520e6b0fd7574cd 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::ratio
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 030afa73a234ced2ecefc7fbc0da16c1cf717d4a..fc6bd3c702acda1f8b4dc64cb043509003ee2dc3 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator = (Vector<PetscVector>)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <iostream>
index 1abb9087eaddb28ace1f48974dbeb57bc7abeef8..90050c76b70217904e45d196faeb8bc63c435a9b 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator = (Vector<T>) with T!=PetscScalar
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <iostream>
index 50073cbfaa03be2e22f1cd29f9123ee64fc7ac8b..4ce1500e9ce07eddd0f6d7e88e511192038efd87 100644 (file)
@@ -17,7 +17,7 @@
 
 // check copy constructor PETScWrappers::MPI::Vector::Vector(Vector)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 128aa132303166c36ff5e21f8e53077e240452de..df087c5f197be789cd6dc79ea6467a9814b5a083 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator() in set, and later in *= mode
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index fee054f48c7e9439b1d7c5eae33e71687c64de9d..02bde6c5a76dea1abe09e0d5457f5c012feba4c3 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator() in set, and later in /= mode
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 62784ace0fa554ef47e5e7c527dbe10c285e16b1..3f25ddfd94778ca016cec870533a3c501d43bf35 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::is_non_zero
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b467b4a1c566e5841ab4143c94d13c2d731697c6..b2279ca4903c8d82a9fb1db21a556eab09311f0e 100644 (file)
@@ -17,7 +17,7 @@
 
 // check ::Vector (const PETScWrappers::MPI::Vector &) copy constructor
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <iostream>
index de1871bbb50edf41abd26a977921de2e162ee95f..b6f2fa5dff233a4385e501473ce11e13dedc0d7b 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector (const ::Vector &) copy constructor
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <iostream>
index 8c13eff0321c10224cb443212721926db167b22a..b6ccb9469cc590f89ec334196d69082b7002492d 100644 (file)
@@ -17,7 +17,7 @@
 
 // check ::Vector::operator = (const PETScWrappers::MPI::Vector &)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <iostream>
index 8c6dda98229921c2bd3a675a793524631055e407..4046f6a49f10ef489953dbcd06d2d3f32bb6e766 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator = (const ::Vector &)
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <iostream>
index 0fafe13e7d2a50b8ff8f9403c3ae5f66d8c29d11..bcff03544b4687599aa85dfe8915a4e234339deb 100644 (file)
@@ -20,7 +20,6 @@
 // PETScWrappers::MatrixBase::operator=
 
 
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
 #include <deal.II/lac/vector.h>
 
index 00c088cdb8ce5a3678d83539dde5f4e17ca2fa36..fdeb1a4b5e92a56f28c5892e24deaa0378c5da41 100644 (file)
@@ -18,7 +18,7 @@
 // This test used to fail after upgrading to petsc 2.2.1
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index ba6db797be17f8fe7667bfba0f6b64ad5203e88b..2cbe3bda3aacdbb0d6fc884ab588ea7fbe58b7c7 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PetscScalar
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../tests.h"
 
index 0fd3f884ba0b75896952550531bbad513d1a2c75..0a1afe2c5a2a1b34c8eb036966df9fe86050a627 100644 (file)
@@ -17,7 +17,7 @@
 
 // make sure that block vector iterator allows reading and writing correctly
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #include <iostream>
 
index 04e10315a4103194bc434fc874eace1ec38c4a6d..6daecb23ddb3dcc353c70df4f12a5478e30a7737 100644 (file)
@@ -17,7 +17,7 @@
 
 // like _01, except that we use operator[] instead of operator*
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #include <iostream>
 
index 46d0d80b60b0ec3d46e1286643217b1c5b474ebd..c1ffd2df3342459a432f9985b7ee6733c59b4c4b 100644 (file)
@@ -17,7 +17,7 @@
 // this test is an adaptation of lac/block_vector_iterator for PETSc block
 // vectors
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #include <algorithm>
 #include <iostream>
index 71f9dc8b40ddcfd13761aedfc320be9b5ce75f92..33a52b5c4835211e69f9fe7cd03232d033627727 100644 (file)
@@ -19,7 +19,7 @@
 #include <deal.II/base/index_set.h>
 #include <deal.II/base/mpi.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include "../tests.h"
index c44000a61d78448897395219a2ecc0e0b7effffa..dee7f516de29e55704e22fa17caccb651149b4de 100644 (file)
@@ -21,7 +21,7 @@
 #include <deal.II/base/index_set.h>
 
 #include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index cb154d4af82576cf0c28a10600430f75fa9f632c..a079938badce2da7ff1acbc85153dd3c0c75055a 100644 (file)
@@ -22,8 +22,8 @@
 
 #include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index d11ff12d7678b4f73f15fa351316fb89f684618d..7300bd2fc9741f6ed7f48945727051a10b40c5df 100644 (file)
@@ -17,8 +17,8 @@
 // test the CG solver using the PETSc matrix and vector classes
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_bicgstab.h>
 #include <deal.II/lac/solver_cg.h>
index a1e9eebf800b982f06768817b46c91ec69215f15..a47da420be8bd9bcefe981f532cd9ba8c8772a30 100644 (file)
@@ -18,8 +18,8 @@
 
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver.h>
 #include <deal.II/lac/solver_bicgstab.h>
index 421eb903764a8d36868e665027fc87ce7e8c31d5..694064beb66ce87240113362534149b7cef96df1 100644 (file)
@@ -18,8 +18,8 @@
 
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver.h>
 #include <deal.II/lac/solver_bicgstab.h>
index f8bc4bfccb7752d67012b2d447a9acec2d310d58..08f8aef33cff10c09df6f28967f901465f72e905 100644 (file)
@@ -17,8 +17,8 @@
 // test the MINRES solver using the PETSc matrix and vector classes
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_bicgstab.h>
 #include <deal.II/lac/solver_cg.h>
index 2c8e74fdc06b3149a60ffd29c51f7864e98ab19f..bd62396ed7e59db496a092ebe8d7513db576e8fb 100644 (file)
@@ -17,8 +17,8 @@
 // test the QMRS solver using the PETSc matrix and vector classes
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_bicgstab.h>
 #include <deal.II/lac/solver_cg.h>
index 99ba6650c06ed0c90619d7dea234f6cf3a96959a..a797d27e77c4e917042696cc9202f3a06968f7f3 100644 (file)
 #include <deal.II/grid/grid_generator.h>
 #include <deal.II/grid/tria.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/sparsity_pattern.h>
 
index 471ee105a478e8631d05df37277c5674cdbda011..d21f32ae3b488c2a9c56fc19bd0eefb18dede2f2 100644 (file)
@@ -18,7 +18,7 @@
 // check FullMatrix::vmult
 
 #include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 9b3199d64a4e0a48ca92a511a9974ed3a8c37481..db8282c1fd5b0b250e2958c52c93d3741d029354 100644 (file)
@@ -18,7 +18,7 @@
 // check FullMatrix::Tvmult
 
 #include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index e3296ed3a0734853207f283e584ab3dbdbc3171d..01bc509f4db5f22060fca8ebc80f6c7c924b68d2 100644 (file)
@@ -18,7 +18,7 @@
 // check FullMatrix::vmult_add
 
 #include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index d58e78c5dcb9b2267ef9784961202dd98ced37bf..a2f831c15387cc2b802782ab25cb4fa3801cde28 100644 (file)
@@ -18,7 +18,7 @@
 // check FullMatrix::Tvmult_add
 
 #include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 3df6b22534e83b7be84498c6d9a36bddd083715e..416714549c2e781c69332245195c9fdd5eb667c5 100644 (file)
@@ -18,7 +18,7 @@
 // check FullMatrix::matrix_scalar_product
 
 #include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index cc885f5e6c4bba00aff5f228a35a8107a0c12a9f..a7d87e31c59f6e625289bab6f3644dd889a01e1a 100644 (file)
@@ -18,7 +18,7 @@
 // check FullMatrix::matrix_norm_square
 
 #include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 7640ebb6a83cf5ce1c33081ad8029ff4e2d33006..26f246a05873fbd87973e386bc6d95ee7cace1f7 100644 (file)
@@ -18,7 +18,7 @@
 // check FullMatrix::matrix_norm_square
 
 #include <deal.II/lac/petsc_full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b92b8821bfdbf1225b2ffb84cc0c2e82749cf77b..2698e8234ac641515db33f7d4505c9c1d312e6f0 100644 (file)
@@ -26,8 +26,8 @@
 #include <deal.II/base/index_set.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
 #include <deal.II/lac/petsc_precondition.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
 
 #include <iostream>
 #include <vector>
index 3b704e8ae78de3421902acb180de79da54765a42..ac0f7a4af544c009fa97aeddf3d6ba5ad182e281 100644 (file)
@@ -27,7 +27,7 @@
 // malloc calls have been performed
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
 
 #include "../tests.h"
 
index abbc5c27ef252818becc3cb1fdfe4698b4e9016f..eab44248ea0ac67fe9eb07d82caefbb8fdfb3a80 100644 (file)
@@ -19,8 +19,8 @@
 #include <deal.II/base/index_set.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
 #include <deal.II/lac/petsc_precondition.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
 
 #include <iostream>
 #include <vector>
index f1bd19418f749682f34d5ac5a766362d792c0e1b..7fd306e2390f565f1da41af7978a9a6a3857df3d 100644 (file)
@@ -20,9 +20,9 @@
 #include <deal.II/base/index_set.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 7c5b62a904af82afafc21e046acf231a386f80ec..2f21b646a3dd15422ee922b6831f9f200b237904 100644 (file)
@@ -22,8 +22,8 @@
 //
 // the tests build the 5-point stencil matrix for a uniform grid of size N*N
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 
 #include <iostream>
index cb2e0b8626bcf4496076ac25528147ec15b93003..28fef25dff0dd4b97dbe978d7648548bccf12e48 100644 (file)
@@ -22,8 +22,8 @@
 //
 // the tests build the 5-point stencil matrix for a uniform grid of size N*N
 
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 
 #include <iostream>
index c4aa715406848b9712f8265b68b5af7bdefd47eb..631a1e5c3227866aacdfa714b4db63d86437d430 100644 (file)
@@ -28,8 +28,8 @@
 // matrix in a consecutive fashion, but rather according to the order of
 // degrees of freedom in the sequence of cells that we traverse
 
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 
 #include <iostream>
index 640e410b070b18f7210c1f4009109480030e7bb1..d8d4ca1d587ca57aa4082e3d31a077566a0b9728 100644 (file)
 
 // test the PETSc Richardson solver
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../testmatrix.h"
 #include "../tests.h"
index 4fab023c4ffbb337a0f8142a86b18034e5b77a34..a8a60f246ea8a91fe834caf63c505dd61fa52c53 100644 (file)
 
 // test the PETSc Chebychev solver
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../testmatrix.h"
 #include "../tests.h"
index 73b4b981e5bab9972e23c1e347e4fb994ced62a3..39b4d6725e8d7739a01f1cda6a15d21d65c53c20 100644 (file)
 // test the PETSc CG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 4951426d53389f79dab70c6c668dfc927b8db2ad..8fd0fa7b1fafa6d8b7b21e931d952822d61f5f20 100644 (file)
 // test the PETSc CG solver with PETSc MatrixFree class
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 85052b823652a95ba906ada2dc206ff5f2eba452..c8e4379eca289a3b7b7541b901b7f0d0e432b65c 100644 (file)
 // preconditioner
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 729d689df253234d74fdb01787e98141ff606e41..daa67a74c9da8a85de3103d10b7a3ab25b153b66 100644 (file)
 // matrix
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index ef474c23a7c6192ad56dbf8cb01cf002bf775004..318ab358ec5567fd6738a3c65eb903b88c5dd3c6 100644 (file)
 // test the PETSc CG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index bdfa8d9f2dbc005457b43ffdedd5d28642093531..79d415c37cc9dbfdfb308eb9c2fb00baf169d3dc 100644 (file)
 // test the PETSc CG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 29b19068e10045361b87fb5ef0679ad97b454431..2e1b70f2966d8b53c19298ff241d59d0820cef75 100644 (file)
 // test the PETSc CG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 689295dce898edfda3c97e3ed4578a79a0b0d0c8..7208b021fc87e9fe4b871720534cf52b691d3856 100644 (file)
 // preconditioner. This should converge in exactly one iteration
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 770957c885738842f16336f7c5555fdb0f1cd910..4ee6b6bd08fbcac7557fc618f09c9168ed77b049 100644 (file)
 // test the PETSc CG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 3f0b76ba9785c60a91ca66e9cc822e4465725e8a..1c10a5670b59fec9b8297a89e1f519e30205cb36 100644 (file)
 // test the PETSc CG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 4b944c1dc6da61327a48a34dd983072352b579c1..155981cb157ef66a1e33a43e707bb3e1a54c8c5c 100644 (file)
 // test the PETSc CG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index cd31ad5f57b04e6118ad3e4100338f382830ff37..5b9b7888059d4cd760be54242ccc97bc86f8cc21 100644 (file)
 // test the PETSc BiCG solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 9dbabcba581856433974e9dafaf8e953c4a61fc5..5b3cd115f9823f1076ea173cdf27b8b5a0450622 100644 (file)
 // test the PETSc GMRES solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 429277918db94dd7a4df9f7d2346189ed5ea7f96..e779bb979456fc8b2c7415520600abee39f5e08d 100644 (file)
 // test the PETSc Bicgstab solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 0fa058f58dc70411481d564a81ad288687d47c8d..4a113c4382138dff5814ea2eb3f3ce411fe14703 100644 (file)
 // test the PETSc CGS solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 96ef8d722c013acb987939b3fa62670512c82c38..c39c142b3a69eeba10ef064a102eff9c7bad4fd1 100644 (file)
 // test the PETSc TFQMR solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 59dc9bbaaee69fa22ff596066b490f18a3a9994a..581b3c52375dbcbce7d457f318586716ab7fc1ac 100644 (file)
 
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 4857fefed5b24089d7e37b298486dd2c5aa4b849..13aeac5b15b2bf23451aa611c0d2a6c70c23547d 100644 (file)
 // test the PETSc CR solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index b0dda2e417e32f9d86e4e7a6cdaceebb145fc439..c8b548caa27dbf08db5db384e51d301cdfec8e84 100644 (file)
 
 // test the PETSc LSQR solver
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../testmatrix.h"
 #include "../tests.h"
index 61057b3fd0f85c7b4a71e8b12327f3856768057b..17dfa5bf6d014260df20ff000c56fcabd31bbc35 100644 (file)
 // test the PETSc PreOnly solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 96cda6f04ed9ec5ca1e5a684998e87b28d841031..8eef95e116c0784bef175369fffbcf68be0ee390 100644 (file)
 // test the PETScWrapper::Precondition*::vmult
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index f6169213d8a42b86864f6775766e11e8168579f1..9179eff4975455687e7d1c2435a4da93a33066b9 100644 (file)
 // test the PETSc SparseDirectMumps solver
 
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index f2b74a0e6667a14bb81bb2302151db6a3e545d29..bf12fd014769f8cdf153fafed451f137f6b447e5 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::mmult
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index dcb10f244209677dd87687802f81b49debfcb017..67d3caba8b89039ab817908a1b9ca9a4b408a11a 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::Tmmult
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index a9568a7bad0c2d36690c9dec45c8c62180d46181..c9cc052ae9b810ee41bfd7f4ed856812034b7147 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::mmult
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index cfb1bf3d94800f883a914ef2ca512caf259ac201..277f31e83e0ed111a1af86fcf8da35188d64917d 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::Tmmult
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index f1bfc0963b462678f1585f45c4433e12d010f20e..bc4c875094f42ea0a56ebd83a7215a2bc5f6a164 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::vmult
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index cd0aa6c6e6626070e0052bdff38d58d36f1f30c9..4b5d8a8da32d292443e36f23826a5f6a51cce975 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::Tvmult
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 6515cd28e5004ab780d51ed2620d488890b83029..200942799e681993a9813ec2b3f752367ccc877d 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::vmult_add
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 465c1427aa1bf52f670602175553a6ec84c5099b..48b9091cb3b99fff4796d2e4e6232f934298e94a 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::Tvmult_add
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index bd49c98c130e6989bd072cc38975a959a1887f76..cd30d937ce0d1eca3dcee346e7777130fe64136e 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::matrix_scalar_product
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index e0b85d5aed18966de3bf45aaf44b7b56ca33093e..7deea00e071c81768cb2abd0b6658a5d4dace24b 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::matrix_norm_square
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 8a8e554ad3144e3b0622616c0227ab0762167510..eac5803993d787b7182274b5c424ce5f919e86a4 100644 (file)
@@ -17,8 +17,8 @@
 
 // check SparseMatrix::matrix_norm_square
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 85cafac40ccec0cdac7cf588cc350c02a3f8a68b..21f4bee5b0cc35484a71d88f5b8eacc66d4445e6 100644 (file)
@@ -17,8 +17,8 @@
 
 // check VectorTools::subtract_mean_value() for PETSc vectors
 
-#include <deal.II/lac/petsc_parallel_block_vector.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <deal.II/numerics/vector_tools.h>
 
index 2276f504fac02395e670b440905ff58014bf867d..1103acc87993d84e8f6ed74adf963c4e14c0c43f 100644 (file)
@@ -12,7 +12,7 @@
 // the top level directory of deal.II.
 //
 // ---------------------------------------------------------------------
-#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
 
 #include "../tests.h"
 
index 65f57f68c49529de3181e6532d16487c6aed67a2..ec7d2482c1b9251d633d3c3b63b692954aba0202 100644 (file)
@@ -23,7 +23,7 @@
 //
 // this was fixed 2004-04-05, and this test checks that it works
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index f53f68e1178b825b60c8899512d91e3205c9d4e4..20b0cc9f8ce00a1fed28a2afb9e6d0292e0843d4 100644 (file)
@@ -22,7 +22,7 @@
 // argument to the user-defined operator+=. This is not exciting, but since I
 // wrote the test to make sure it works this way, let's keep it then...
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index c6123add50adae91a26efe9298d87e8b5188fa0c..abc7932ea1578d32441ef7874581b3fa7dcf0dfb 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
 // for vectors that are not equal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 82f4088d4c7d5958ffa606f334ba53c65a8ef397..da1e8f568a889c8d576dedbade007d5fd87b48a5 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
 // for vectors that are equal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 933894706ac7ad811f585a833287df5d1bac7dbe..22018deac5b7f8579f7f09ad23f6ad89850d24a5 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator!=(PETScWrappers::MPI::Vector)
 // for vectors that are not equal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index fc02aacaf1523f656843d860f3f5a7d75c19f652..3ddb59e231d694cb441f669d58eb5ea5e72a0a74 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator!=(PETScWrappers::MPI::Vector)
 // for vectors that are equal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index f49cfff25b7bbbd79be0c13b503867b7fe032b3a..23c89ef545b11b080031ceea99d075a2dc296080 100644 (file)
@@ -18,7 +18,7 @@
 // verify that VectorBase::print uses the precision parameter correctly and
 // restores the previous value of the stream precision
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 8054a97bde2466f17f48af02fcd7f46a2abf268a..e3d92e28867dd653189fadc5f2af0505e6fa5d14 100644 (file)
@@ -18,7 +18,7 @@
 // Test the constructor PETScWrappers::VectorBase(const Vec &) that takes an
 // existing PETSc vector.
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 32348fd238c644cb49e492d2a442901426bb1e4b..20a4b99a020468aeabf75bd7df6136c229cabc72 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::size()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 
index 42fd257896b8116e349cd2d457424bb31a546b7b..5389891c2a50e3e587ed5a3c245ecd3af4efcb9c 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator() in set-mode
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index a32035b20674e2a8da7f05b51e9087eeface0d73..0565b06764e8e4c467236eba3c54d01f372cbdd9 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::operator() in add-mode
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 1fa076a422253e24b2d1e20666a8aa52f164028c..cfc75db82358a8e8a09de4d54b443ede08901837 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::l1_norm()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b416e9dcba836ff46761eca78140249ced1605b0..5f258322bc61467edeb1f1bb7735282ca873cb6f 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::l2_norm()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index ff6b49ed510dcea4a85ac683251781941375ed1e..1e88fe1c64e6805198b3a8b537c1e67d4812c78d 100644 (file)
@@ -17,7 +17,7 @@
 
 // check PETScWrappers::MPI::Vector::linfty_norm()
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b9c0b50c8681710fe1fc634697277c4e213c4a49..607a1bc97bfb45144e1ad20b44579445f2478840 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are
 // not orthogonal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index ed017c0141ddf6fc55cc2613ed04199553bc763d..a94ca21541129ecaa2ed01ac97e7f62ecdfb0306 100644 (file)
@@ -20,9 +20,8 @@
 
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <numeric>
index aa7097bdd32330d0242a8daa4013eb1566cb0bdb..f14301187574ec3e5638fbc5ba7e42c45ad6c713 100644 (file)
@@ -15,7 +15,7 @@
 
 
 // deal.II includes
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <cassert>
 #include <complex>
index a85328170a2b0999cb5eb626124b2e2edf383a3c..583d8dc2ffaed721a10083029e8ea0215a7ff2f6 100644 (file)
@@ -42,8 +42,8 @@
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/full_matrix.h>
 #include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include "../tests.h"
index cc55665e0fee024a91203b17cb357af1b9d19ee9..ef57ac9d8d2a574810d98ebe494dbac9b8940e81 100644 (file)
@@ -42,8 +42,8 @@
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
 
index 87bd6f0794b30722850d8e545a08783406db9238..3a507cb5a80b8cfbcea7cf40c5c5f9997906c4ef 100644 (file)
 
 // Note: This is (almost) a clone of the tests/petsc/solver_01.cc
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../testmatrix.h"
 #include "../tests.h"
index a68d9ace477d56fec7b1681831543129c4745885..fc598c3f69066f99aa312a1aab7cb338ea64a86f 100644 (file)
 
 // Note: This is (almost) a clone of the tests/petsc/solver_02.cc
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include "../testmatrix.h"
 #include "../tests.h"
index f5b8063d12b2eef011f86c09e98045c245ffadb9..12d8a854b25c32c7b924b123883462c73ee9b9f4 100644 (file)
 
 // Note: This is (almost) a clone of the tests/petsc/solver_03.cc
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index ca9bb033bfdd10a60260317ab21944bc019959fb..56be64ba86b20a4f2035c71b3484e0376f66ae5a 100644 (file)
 // numbers to a possibly
 // complex matrix where
 // petsc-scalar=complex.
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 0a8194e48d43b5b876157ba06222a5f1608f6236..9a97f066ef43f82bb5dac1dbadcbcba205aa2aa1 100644 (file)
 
 // Note: This is (almost) a clone of the tests/petsc/solver_03.cc
 
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector_memory.h>
 
 #include <iostream>
index 9bd64b7f585b58c31afbc10628a9b22777858061..d330b6deb0ab9376f1d5a4352471dc6a0b57e222 100644 (file)
@@ -17,7 +17,7 @@
 
 // check assignment of elements in Vector
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/vector.h>
 
 #include <iostream>
index c06bb498acb319bdddc59be974c42a335fa6f66a..5a9be5a4ff4a55129aa17aec6eae0559a3ec5e5f 100644 (file)
@@ -16,7 +16,7 @@
 
 
 // See notes in petsc/vector_assign_01.cc
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index d61d9fe50b0379eca944d8604613a4fbd8b02050..e3fc46dd4a89d36c517cb261f70cb7cf69ff8464 100644 (file)
@@ -18,7 +18,7 @@
 // this is equivalent to the petsc_parallel_vector_assign_01 test, except that
 // we use operator+= instead of operator=. This is also not exciting...
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 3f0db1a7245754e1f04c714419fd824d64da3fe3..174c391522c0a616637d973212d45947466f302f 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
 // for vectors that are not equal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 5cb96e5d72aa2dbea9ef3405bb497be7d89c5dbe..db6bc45f2fc696b3f04aa3324b717633496c11fa 100644 (file)
@@ -18,7 +18,7 @@
 // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector)
 // for vectors that are equal
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index 2b0a7782b6c3de1581dc9c8c3ec73165488b9801..1e87b63a83ff162c8bfab0f62d2d0fa039e02918 100644 (file)
@@ -18,7 +18,7 @@
 // verify that VectorBase::print uses the precision parameter correctly and
 // restores the previous value of the stream precision
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index b2d3fee1a1adab99c05ce28f5d4b7112c42d98a6..e602ba52c56b05250a223302a2ddfd8b4fab5918 100644 (file)
@@ -18,7 +18,7 @@
 // Test the constructor PETScWrappers::VectorBase(const Vec &) that takes an
 // existing PETSc vector for complex values.
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <iostream>
 #include <vector>
index e4d458b0f963de7e2438be5a9c02a60d772a6eb3..b3c5617a8b36babfdd6a3fc36b42bdafb9e3a4d1 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
 
index 91fa6fd4434cc274d65907dc37458c2a70b0e540..8af2d09427ae9e4507daa40489b6073d6aface13 100644 (file)
 #include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
 
index b9a2f90ef84dad7320585e7e4f028504823d61ca..45a27b7279ea9d589dd5b869641762879eddd560 100644 (file)
 #include <deal.II/grid/tria_iterator.h>
 
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <deal.II/numerics/data_out.h>
 #include <deal.II/numerics/matrix_tools.h>
index e07fc5fb91440984a03429c436bc5b8407fb3cf7..70e1a55bc9929bfc2319f681fe91e29cfdf8d175 100644 (file)
@@ -32,8 +32,8 @@
 #include <deal.II/grid/tria_iterator.h>
 
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 
 #include <deal.II/numerics/data_out.h>
index 6f95002b880ab2a85f363a85b0dd1c7cc59a3185..666bd208d1850307aea08aa19efadaee65a7ca4d 100644 (file)
 
 
 #include <deal.II/lac/petsc_compatibility.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/vector_memory.h>
 
index 07074ee5e34b5bf1d3234c4ea220747c56509bc7..233ec8f32f6a6b8934d6b9537e42bc018092de7f 100644 (file)
 
 
 #include <deal.II/lac/petsc_compatibility.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/vector_memory.h>
 
index d23651c339cb4d384a89876c15301dc62f31be9c..520e6eb0c76d8647541c37e841d6d2e16f1ad21a 100644 (file)
 #include <deal.II/grid/grid_tools.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index ea58c35b165bff2b8f30cd8b998a535dd21e480f..b58873ff8a64584b8e70c9ae20a11c7c63f8d586 100644 (file)
 #include <deal.II/grid/grid_tools.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 394a66acdc13bfe6fb8b8d790fedb2968c9e0d47..cdb2068806d84fd210e7b71699350162996d2811 100644 (file)
 #include <deal.II/grid/grid_tools.h>
 
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/petsc_precondition.h>
 #include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/slepc_solver.h>
 #include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/lac/vector.h>
index 3f9736457e782e5e75852264eda769d02d64e1ec..289ebacabc0d7261c88ce1e721ad90e5a5f19f7f 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <deal.II/base/mpi.h>
 
-#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 
 #include <deal.II/sundials/copy.h>
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.