From 6a1d83c2e1ab7b959ccaf0b138b348dcd3086228 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Thu, 26 Jul 2018 14:18:24 +0200 Subject: [PATCH] Rename petsc_parallel_* headers to petsc_* --- .../fe/fe_tools_extrapolate.templates.h | 4 +- .../fe/fe_tools_interpolate.templates.h | 4 +- .../lac/affine_constraints.templates.h | 7 +- include/deal.II/lac/generic_linear_algebra.h | 4 +- .../lac/la_parallel_block_vector.templates.h | 2 +- .../lac/la_parallel_vector.templates.h | 2 +- .../deal.II/lac/petsc_block_sparse_matrix.h | 361 +++++++++++ include/deal.II/lac/petsc_block_vector.h | 564 +++++++++++++++++ include/deal.II/lac/petsc_matrix_free.h | 2 +- .../lac/petsc_parallel_block_sparse_matrix.h | 340 +---------- .../deal.II/lac/petsc_parallel_block_vector.h | 543 +---------------- .../lac/petsc_parallel_sparse_matrix.h | 511 +--------------- include/deal.II/lac/petsc_parallel_vector.h | 552 +---------------- include/deal.II/lac/petsc_sparse_matrix.h | 489 ++++++++++++++- include/deal.II/lac/petsc_vector.h | 573 ++++++++++++++++++ .../deal.II/lac/read_write_vector.templates.h | 2 +- include/deal.II/multigrid/mg_transfer.h | 4 +- .../deal.II/multigrid/mg_transfer.templates.h | 2 +- .../numerics/error_estimator.templates.h | 4 +- .../numerics/matrix_creator.templates.h | 5 +- .../deal.II/numerics/vector_tools.templates.h | 4 +- include/deal.II/sundials/arkode.h | 4 +- include/deal.II/sundials/copy.h | 4 +- include/deal.II/sundials/ida.h | 4 +- source/algorithms/operator.cc | 4 +- source/base/mpi.cc | 4 +- source/base/time_stepping.cc | 4 +- source/distributed/solution_transfer.cc | 4 +- source/dofs/dof_accessor_get.cc | 4 +- source/dofs/dof_accessor_set.cc | 4 +- source/fe/fe_values.cc | 4 +- source/fe/mapping_fe_field.cc | 4 +- source/fe/mapping_q1_eulerian.cc | 4 +- source/fe/mapping_q_eulerian.cc | 4 +- source/lac/block_matrix_array.cc | 2 +- source/lac/petsc_matrix_base.cc | 1 - .../lac/petsc_parallel_block_sparse_matrix.cc | 2 +- source/lac/petsc_parallel_block_vector.cc | 2 +- source/lac/petsc_parallel_sparse_matrix.cc | 4 +- source/lac/petsc_parallel_vector.cc | 2 +- source/lac/petsc_vector_base.cc | 2 +- source/lac/solver.cc | 4 +- source/lac/vector_memory.cc | 4 +- .../meshworker/mesh_worker_vector_selector.cc | 4 +- source/multigrid/mg_base.cc | 2 +- source/multigrid/multigrid.cc | 4 +- source/non_matching/coupling.cc | 3 +- source/numerics/data_out_dof_data.cc | 4 +- source/numerics/data_out_dof_data_codim.cc | 4 +- source/numerics/data_out_dof_data_inst2.cc | 4 +- source/numerics/derivative_approximation.cc | 4 +- source/numerics/dof_output_operator.cc | 4 +- source/numerics/error_estimator_1d.cc | 4 +- source/numerics/fe_field_function.cc | 4 +- source/numerics/matrix_tools.cc | 5 +- source/numerics/matrix_tools_once.cc | 4 +- source/numerics/point_value_history.cc | 4 +- source/numerics/solution_transfer.cc | 4 +- source/sundials/arkode.cc | 4 +- source/sundials/ida.cc | 4 +- source/sundials/kinsol.cc | 4 +- .../parpack_advection_diffusion_petsc.cc | 4 +- tests/arpack/step-36_parpack.cc | 4 +- tests/fe/fe_enriched_step-36.cc | 4 +- tests/fe/fe_enriched_step-36b.cc | 4 +- tests/gla/extract_subvector_to.cc | 4 +- .../hp/hp_constraints_neither_dominate_01.cc | 4 +- .../hp/hp_constraints_neither_dominate_02.cc | 4 +- tests/lac/linear_operator_09.cc | 7 +- tests/lac/utilities_01.cc | 2 +- tests/lac/vector_reinit_03.cc | 2 +- tests/lac/vector_type_traits_is_serial_03.cc | 4 +- tests/mappings/mapping_q_eulerian_07.cc | 2 +- tests/mappings/mapping_q_eulerian_08.cc | 2 +- tests/matrix_free/interpolate_to_mg.cc | 2 +- tests/mpi/blockvec_01.cc | 2 +- tests/mpi/blockvec_02.cc | 2 +- tests/mpi/condense_01.cc | 2 +- tests/mpi/constraint_matrix_condense_01.cc | 2 +- tests/mpi/constraint_matrix_set_zero_01.cc | 4 +- tests/mpi/data_out_faces_01.cc | 2 +- tests/mpi/fe_tools_extrapolate_02.cc | 2 +- tests/mpi/fe_tools_extrapolate_05.cc | 2 +- tests/mpi/ghost_01.cc | 2 +- tests/mpi/ghost_02.cc | 2 +- tests/mpi/ghost_03.cc | 2 +- tests/mpi/has_hanging_nodes.cc | 4 +- tests/mpi/hp_step-40.cc | 4 +- tests/mpi/interpolate_02.cc | 2 +- tests/mpi/interpolate_04.cc | 2 +- tests/mpi/p4est_save_01.cc | 2 +- tests/mpi/p4est_save_02.cc | 2 +- tests/mpi/p4est_save_03.cc | 2 +- tests/mpi/p4est_save_04.cc | 2 +- tests/mpi/periodicity_01.cc | 4 +- tests/mpi/petsc_01.cc | 1 - tests/mpi/petsc_02.cc | 1 - tests/mpi/petsc_03.cc | 4 +- tests/mpi/petsc_bug_ghost_vector_01.cc | 4 +- tests/mpi/petsc_distribute_01.cc | 2 +- tests/mpi/petsc_distribute_01_block.cc | 2 +- tests/mpi/petsc_distribute_01_inhomogenous.cc | 2 +- tests/mpi/petsc_locally_owned_elements.cc | 2 +- tests/mpi/solution_transfer_01.cc | 2 +- tests/mpi/step-40.cc | 4 +- tests/mpi/step-40_cuthill_mckee.cc | 4 +- tests/mpi/step-40_cuthill_mckee_MPI-subset.cc | 4 +- tests/mpi/step-40_direct_solver.cc | 4 +- tests/multigrid/transfer_04b.cc | 2 +- tests/petsc/11.cc | 2 +- tests/petsc/12.cc | 2 +- tests/petsc/13.cc | 2 +- tests/petsc/17.cc | 2 +- tests/petsc/18.cc | 2 +- tests/petsc/19.cc | 2 +- tests/petsc/20.cc | 2 +- tests/petsc/21.cc | 2 +- tests/petsc/22.cc | 2 +- tests/petsc/23.cc | 2 +- tests/petsc/24.cc | 2 +- tests/petsc/26.cc | 2 +- tests/petsc/27.cc | 2 +- tests/petsc/28.cc | 2 +- tests/petsc/29.cc | 2 +- tests/petsc/30.cc | 2 +- tests/petsc/31.cc | 2 +- tests/petsc/32.cc | 2 +- tests/petsc/33.cc | 2 +- tests/petsc/34.cc | 2 +- tests/petsc/35.cc | 2 +- tests/petsc/36.cc | 2 +- tests/petsc/37.cc | 2 +- tests/petsc/39.cc | 2 +- tests/petsc/40.cc | 2 +- tests/petsc/41.cc | 2 +- tests/petsc/42.cc | 2 +- tests/petsc/45.cc | 2 +- tests/petsc/46.cc | 2 +- tests/petsc/48.cc | 2 +- tests/petsc/49.cc | 2 +- tests/petsc/50.cc | 2 +- tests/petsc/51.cc | 2 +- tests/petsc/55.cc | 2 +- tests/petsc/56.cc | 2 +- tests/petsc/57.cc | 2 +- tests/petsc/58.cc | 2 +- tests/petsc/59.cc | 2 +- tests/petsc/60.cc | 2 +- tests/petsc/61.cc | 2 +- tests/petsc/64.cc | 1 - tests/petsc/65.cc | 2 +- tests/petsc/70.cc | 2 +- tests/petsc/block_vector_iterator_01.cc | 2 +- tests/petsc/block_vector_iterator_02.cc | 2 +- tests/petsc/block_vector_iterator_03.cc | 2 +- tests/petsc/copy_parallel_vector.cc | 2 +- tests/petsc/copy_to_dealvec.cc | 2 +- tests/petsc/copy_to_dealvec_block.cc | 4 +- tests/petsc/deal_solver_01.cc | 2 +- tests/petsc/deal_solver_02.cc | 2 +- tests/petsc/deal_solver_03.cc | 2 +- tests/petsc/deal_solver_04.cc | 2 +- tests/petsc/deal_solver_05.cc | 2 +- .../petsc/different_matrix_preconditioner.cc | 2 +- tests/petsc/full_matrix_vector_01.cc | 2 +- tests/petsc/full_matrix_vector_02.cc | 2 +- tests/petsc/full_matrix_vector_03.cc | 2 +- tests/petsc/full_matrix_vector_04.cc | 2 +- tests/petsc/full_matrix_vector_05.cc | 2 +- tests/petsc/full_matrix_vector_06.cc | 2 +- tests/petsc/full_matrix_vector_07.cc | 2 +- tests/petsc/iterate_parallel_01.cc | 2 +- tests/petsc/parallel_sparse_matrix_01.cc | 2 +- tests/petsc/reinit_preconditioner_01.cc | 2 +- tests/petsc/reinit_preconditioner_02.cc | 2 +- tests/petsc/slowness_02.cc | 2 +- tests/petsc/slowness_03.cc | 4 +- tests/petsc/slowness_04.cc | 4 +- tests/petsc/solver_01.cc | 2 +- tests/petsc/solver_02.cc | 2 +- tests/petsc/solver_03.cc | 2 +- tests/petsc/solver_03_mf.cc | 2 +- .../petsc/solver_03_precondition_boomeramg.cc | 2 +- ...ver_03_precondition_boomeramg_symmetric.cc | 2 +- .../petsc/solver_03_precondition_eisenstat.cc | 2 +- tests/petsc/solver_03_precondition_icc.cc | 2 +- tests/petsc/solver_03_precondition_ilu.cc | 2 +- tests/petsc/solver_03_precondition_lu.cc | 2 +- .../petsc/solver_03_precondition_parasails.cc | 2 +- tests/petsc/solver_03_precondition_sor.cc | 2 +- tests/petsc/solver_03_precondition_ssor.cc | 2 +- tests/petsc/solver_04.cc | 2 +- tests/petsc/solver_05.cc | 2 +- tests/petsc/solver_06.cc | 2 +- tests/petsc/solver_07.cc | 2 +- tests/petsc/solver_08.cc | 2 +- tests/petsc/solver_09.cc | 2 +- tests/petsc/solver_10.cc | 2 +- tests/petsc/solver_11.cc | 2 +- tests/petsc/solver_12.cc | 2 +- tests/petsc/solver_13.cc | 2 +- tests/petsc/sparse_direct_mumps.cc | 2 +- tests/petsc/sparse_matrix_matrix_01.cc | 2 +- tests/petsc/sparse_matrix_matrix_02.cc | 2 +- tests/petsc/sparse_matrix_matrix_03.cc | 2 +- tests/petsc/sparse_matrix_matrix_04.cc | 2 +- tests/petsc/sparse_matrix_vector_01.cc | 2 +- tests/petsc/sparse_matrix_vector_02.cc | 2 +- tests/petsc/sparse_matrix_vector_03.cc | 2 +- tests/petsc/sparse_matrix_vector_04.cc | 2 +- tests/petsc/sparse_matrix_vector_05.cc | 2 +- tests/petsc/sparse_matrix_vector_06.cc | 2 +- tests/petsc/sparse_matrix_vector_07.cc | 2 +- tests/petsc/subtract_mean_value_03.cc | 4 +- tests/petsc/update_ghosts.cc | 2 +- tests/petsc/vector_assign_01.cc | 2 +- tests/petsc/vector_assign_02.cc | 2 +- tests/petsc/vector_equality_1.cc | 2 +- tests/petsc/vector_equality_2.cc | 2 +- tests/petsc/vector_equality_3.cc | 2 +- tests/petsc/vector_equality_4.cc | 2 +- tests/petsc/vector_print.cc | 2 +- tests/petsc/vector_wrap_01.cc | 2 +- tests/petsc_complex/11.cc | 2 +- tests/petsc_complex/12.cc | 2 +- tests/petsc_complex/13.cc | 2 +- tests/petsc_complex/17.cc | 2 +- tests/petsc_complex/18.cc | 2 +- tests/petsc_complex/19.cc | 2 +- tests/petsc_complex/20.cc | 2 +- tests/petsc_complex/assemble_01.cc | 3 +- tests/petsc_complex/element_access_00.cc | 2 +- tests/petsc_complex/fe_get_function_values.cc | 4 +- .../parallel_sparse_matrix_01.cc | 4 +- tests/petsc_complex/solver_real_01.cc | 2 +- tests/petsc_complex/solver_real_02.cc | 2 +- tests/petsc_complex/solver_real_03.cc | 2 +- tests/petsc_complex/solver_real_03_mf.cc | 2 +- tests/petsc_complex/solver_real_04.cc | 2 +- tests/petsc_complex/vector_02.cc | 2 +- tests/petsc_complex/vector_assign_01.cc | 2 +- tests/petsc_complex/vector_assign_02.cc | 2 +- tests/petsc_complex/vector_equality_1.cc | 2 +- tests/petsc_complex/vector_equality_2.cc | 2 +- tests/petsc_complex/vector_print.cc | 2 +- tests/petsc_complex/vector_wrap_01.cc | 2 +- tests/physics/step-18-rotation_matrix.cc | 4 +- tests/physics/step-18.cc | 4 +- tests/quick_tests/step-petsc.cc | 2 +- tests/quick_tests/step-slepc.cc | 2 +- tests/slepc/solve_01.cc | 2 +- tests/slepc/solve_04.cc | 2 +- tests/slepc/step-36_parallel.cc | 4 +- tests/slepc/step-36_parallel_02.cc | 4 +- tests/slepc/step-36_parallel_03.cc | 4 +- tests/sundials/copy_01.cc | 2 +- 256 files changed, 2314 insertions(+), 2261 deletions(-) create mode 100644 include/deal.II/lac/petsc_block_sparse_matrix.h create mode 100644 include/deal.II/lac/petsc_block_vector.h create mode 100644 include/deal.II/lac/petsc_vector.h diff --git a/include/deal.II/fe/fe_tools_extrapolate.templates.h b/include/deal.II/fe/fe_tools_extrapolate.templates.h index 56daaa1e71..01492c650f 100644 --- a/include/deal.II/fe/fe_tools_extrapolate.templates.h +++ b/include/deal.II/fe/fe_tools_extrapolate.templates.h @@ -33,8 +33,8 @@ #include #include #include -#include -#include +#include +#include #include #include diff --git a/include/deal.II/fe/fe_tools_interpolate.templates.h b/include/deal.II/fe/fe_tools_interpolate.templates.h index 296a5cda47..715cc6ce69 100644 --- a/include/deal.II/fe/fe_tools_interpolate.templates.h +++ b/include/deal.II/fe/fe_tools_interpolate.templates.h @@ -43,8 +43,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index 291a7ee420..40080fdd71 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -33,11 +33,10 @@ #include #include #include -#include -#include -#include -#include +#include +#include #include +#include #include #include #include diff --git a/include/deal.II/lac/generic_linear_algebra.h b/include/deal.II/lac/generic_linear_algebra.h index 5187bb79a6..b4f2a8e1c6 100644 --- a/include/deal.II/lac/generic_linear_algebra.h +++ b/include/deal.II/lac/generic_linear_algebra.h @@ -50,10 +50,10 @@ DEAL_II_NAMESPACE_CLOSE #ifdef DEAL_II_WITH_PETSC # include -# include -# include +# include # include # include +# include DEAL_II_NAMESPACE_OPEN diff --git a/include/deal.II/lac/la_parallel_block_vector.templates.h b/include/deal.II/lac/la_parallel_block_vector.templates.h index d2567d59d3..fecbee6055 100644 --- a/include/deal.II/lac/la_parallel_block_vector.templates.h +++ b/include/deal.II/lac/la_parallel_block_vector.templates.h @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 44a45b1889..d4d233d820 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -23,7 +23,7 @@ #include #include -#include +#include #include #include #include diff --git a/include/deal.II/lac/petsc_block_sparse_matrix.h b/include/deal.II/lac/petsc_block_sparse_matrix.h new file mode 100644 index 0000000000..9688daf98c --- /dev/null +++ b/include/deal.II/lac/petsc_block_sparse_matrix.h @@ -0,0 +1,361 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2004 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_petsc_block_sparse_matrix_h +#define dealii_petsc_block_sparse_matrix_h + + +#include + +#ifdef DEAL_II_WITH_PETSC + +# include + +# include +# include +# include +# include +# include + +# include + +DEAL_II_NAMESPACE_OPEN + + + +namespace PETScWrappers +{ + namespace MPI + { + /*! @addtogroup PETScWrappers + *@{ + */ + + /** + * Blocked sparse matrix based on the PETScWrappers::MPI::SparseMatrix + * class. This class implements the functions that are specific to the + * PETSc SparseMatrix base objects for a blocked sparse matrix, and leaves + * the actual work relaying most of the calls to the individual blocks to + * the functions implemented in the base class. See there also for a + * description of when this class is useful. + * + * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices + * do not have external objects for the sparsity patterns. Thus, one does + * not determine the size of the individual blocks of a block matrix of + * this type by attaching a block sparsity pattern, but by calling + * reinit() to set the number of blocks and then by setting the size of + * each block separately. In order to fix the data structures of the block + * matrix, it is then necessary to let it know that we have changed the + * sizes of the underlying matrices. For this, one has to call the + * collect_sizes() function, for much the same reason as is documented + * with the BlockSparsityPattern class. + * + * @ingroup Matrix1 @see + * @ref GlossBlockLA "Block (linear algebra)" + * @author Wolfgang Bangerth, 2004 + */ + class BlockSparseMatrix : public BlockMatrixBase + { + public: + /** + * Typedef the base class for simpler access to its own alias. + */ + using BaseClass = BlockMatrixBase; + + /** + * Typedef the type of the underlying matrix. + */ + using BlockType = BaseClass::BlockType; + + /** + * Import the alias from the base class. + */ + using value_type = BaseClass::value_type; + using pointer = BaseClass::pointer; + using const_pointer = BaseClass::const_pointer; + using reference = BaseClass::reference; + using const_reference = BaseClass::const_reference; + using size_type = BaseClass::size_type; + using iterator = BaseClass::iterator; + using const_iterator = BaseClass::const_iterator; + + /** + * Constructor; initializes the matrix to be empty, without any + * structure, i.e. the matrix is not usable at all. This constructor is + * therefore only useful for matrices which are members of a class. All + * other matrices should be created at a point in the data flow where + * all necessary information is available. + * + * You have to initialize the matrix before usage with + * reinit(BlockSparsityPattern). The number of blocks per row and column + * are then determined by that function. + */ + BlockSparseMatrix() = default; + + /** + * Destructor. + */ + ~BlockSparseMatrix() override = default; + + /** + * Pseudo copy operator only copying empty objects. The sizes of the + * block matrices need to be the same. + */ + BlockSparseMatrix & + operator=(const BlockSparseMatrix &); + + /** + * This operator assigns a scalar to a matrix. Since this does usually + * not make much sense (should we set all matrix entries to this value? + * Only the nonzero entries of the sparsity pattern?), this operation is + * only allowed if the actual value to be assigned is zero. This + * operator only exists to allow for the obvious notation + * matrix=0, which sets all elements of the matrix to zero, but + * keep the sparsity pattern previously used. + */ + BlockSparseMatrix & + operator=(const double d); + + /** + * Resize the matrix, by setting the number of block rows and columns. + * This deletes all blocks and replaces them with uninitialized ones, + * i.e. ones for which also the sizes are not yet set. You have to do + * that by calling the @p reinit functions of the blocks themselves. Do + * not forget to call collect_sizes() after that on this object. + * + * The reason that you have to set sizes of the blocks yourself is that + * the sizes may be varying, the maximum number of elements per row may + * be varying, etc. It is simpler not to reproduce the interface of the + * SparsityPattern class here but rather let the user call whatever + * function she desires. + */ + void + reinit(const size_type n_block_rows, const size_type n_block_columns); + + + /** + * Efficiently reinit the block matrix for a parallel computation. Only + * the BlockSparsityPattern of the Simple type can efficiently store + * large sparsity patterns in parallel, so this is the only supported + * argument. The IndexSets describe the locally owned range of DoFs for + * each block. Note that the IndexSets needs to be ascending and 1:1. + * For a symmetric structure hand in the same vector for the first two + * arguments. + */ + void + reinit(const std::vector & rows, + const std::vector & cols, + const BlockDynamicSparsityPattern &bdsp, + const MPI_Comm & com); + + + /** + * Same as above but for a symmetric structure only. + */ + void + reinit(const std::vector & sizes, + const BlockDynamicSparsityPattern &bdsp, + const MPI_Comm & com); + + + + /** + * Matrix-vector multiplication: let $dst = M*src$ with $M$ being this + * matrix. + */ + void + vmult(BlockVector &dst, const BlockVector &src) const; + + /** + * Matrix-vector multiplication. Just like the previous function, but + * only applicable if the matrix has only one block column. + */ + void + vmult(BlockVector &dst, const Vector &src) const; + + /** + * Matrix-vector multiplication. Just like the previous function, but + * only applicable if the matrix has only one block row. + */ + void + vmult(Vector &dst, const BlockVector &src) const; + + /** + * Matrix-vector multiplication. Just like the previous function, but + * only applicable if the matrix has only one block. + */ + void + vmult(Vector &dst, const Vector &src) const; + + /** + * Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this + * matrix. This function does the same as vmult() but takes the + * transposed matrix. + */ + void + Tvmult(BlockVector &dst, const BlockVector &src) const; + + /** + * Matrix-vector multiplication. Just like the previous function, but + * only applicable if the matrix has only one block row. + */ + void + Tvmult(BlockVector &dst, const Vector &src) const; + + /** + * Matrix-vector multiplication. Just like the previous function, but + * only applicable if the matrix has only one block column. + */ + void + Tvmult(Vector &dst, const BlockVector &src) const; + + /** + * Matrix-vector multiplication. Just like the previous function, but + * only applicable if the matrix has only one block. + */ + void + Tvmult(Vector &dst, const Vector &src) const; + + /** + * This function collects the sizes of the sub-objects and stores them + * in internal arrays, in order to be able to relay global indices into + * the matrix to indices into the subobjects. You *must* call this + * function each time after you have changed the size of the sub- + * objects. + */ + void + collect_sizes(); + + /** + * Return the partitioning of the domain space of this matrix, i.e., the + * partitioning of the vectors this matrix has to be multiplied with. + */ + std::vector + locally_owned_domain_indices() const; + + /** + * Return the partitioning of the range space of this matrix, i.e., the + * partitioning of the vectors that are result from matrix-vector + * products. + */ + std::vector + locally_owned_range_indices() const; + + /** + * Return a reference to the MPI communicator object in use with this + * matrix. + */ + const MPI_Comm & + get_mpi_communicator() const; + + /** + * Make the clear() function in the base class visible, though it is + * protected. + */ + using BlockMatrixBase::clear; + }; + + + + /*@}*/ + + // ------------- inline and template functions ----------------- + + inline BlockSparseMatrix & + BlockSparseMatrix::operator=(const double d) + { + Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue()); + + for (size_type r = 0; r < this->n_block_rows(); ++r) + for (size_type c = 0; c < this->n_block_cols(); ++c) + this->block(r, c) = d; + + return *this; + } + + + + inline void + BlockSparseMatrix::vmult(BlockVector &dst, const BlockVector &src) const + { + BaseClass::vmult_block_block(dst, src); + } + + + + inline void + BlockSparseMatrix::vmult(BlockVector &dst, const Vector &src) const + { + BaseClass::vmult_block_nonblock(dst, src); + } + + + + inline void + BlockSparseMatrix::vmult(Vector &dst, const BlockVector &src) const + { + BaseClass::vmult_nonblock_block(dst, src); + } + + + + inline void + BlockSparseMatrix::vmult(Vector &dst, const Vector &src) const + { + BaseClass::vmult_nonblock_nonblock(dst, src); + } + + + inline void + BlockSparseMatrix::Tvmult(BlockVector &dst, const BlockVector &src) const + { + BaseClass::Tvmult_block_block(dst, src); + } + + + + inline void + BlockSparseMatrix::Tvmult(BlockVector &dst, const Vector &src) const + { + BaseClass::Tvmult_block_nonblock(dst, src); + } + + + + inline void + BlockSparseMatrix::Tvmult(Vector &dst, const BlockVector &src) const + { + BaseClass::Tvmult_nonblock_block(dst, src); + } + + + + inline void + BlockSparseMatrix::Tvmult(Vector &dst, const Vector &src) const + { + BaseClass::Tvmult_nonblock_nonblock(dst, src); + } + + } // namespace MPI + +} // namespace PETScWrappers + + +DEAL_II_NAMESPACE_CLOSE + + +#endif // DEAL_II_WITH_PETSC + +#endif // dealii_petsc_block_sparse_matrix_h diff --git a/include/deal.II/lac/petsc_block_vector.h b/include/deal.II/lac/petsc_block_vector.h new file mode 100644 index 0000000000..9420e4d260 --- /dev/null +++ b/include/deal.II/lac/petsc_block_vector.h @@ -0,0 +1,564 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2004 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_petsc_block_vector_h +#define dealii_petsc_block_vector_h + + +#include + +#ifdef DEAL_II_WITH_PETSC + +# include +# include +# include +# include +# include + +DEAL_II_NAMESPACE_OPEN + + +namespace PETScWrappers +{ + // forward declaration + class BlockVector; + + namespace MPI + { + /*! @addtogroup PETScWrappers + *@{ + */ + + /** + * An implementation of block vectors based on the parallel vector class + * implemented in PETScWrappers. While the base class provides for most of + * the interface, this class handles the actual allocation of vectors and + * provides functions that are specific to the underlying vector type. + * + * The model of distribution of data is such that each of the blocks is + * distributed across all MPI processes named in the MPI communicator. + * I.e. we don't just distribute the whole vector, but each component. In + * the constructors and reinit() functions, one therefore not only has to + * specify the sizes of the individual blocks, but also the number of + * elements of each of these blocks to be stored on the local process. + * + * @ingroup Vectors @see + * @ref GlossBlockLA "Block (linear algebra)" + * @author Wolfgang Bangerth, 2004 + */ + class BlockVector : public BlockVectorBase + { + public: + /** + * Typedef the base class for simpler access to its own alias. + */ + using BaseClass = BlockVectorBase; + + /** + * Typedef the type of the underlying vector. + */ + using BlockType = BaseClass::BlockType; + + /** + * Import the alias from the base class. + */ + using value_type = BaseClass::value_type; + using pointer = BaseClass::pointer; + using const_pointer = BaseClass::const_pointer; + using reference = BaseClass::reference; + using const_reference = BaseClass::const_reference; + using size_type = BaseClass::size_type; + using iterator = BaseClass::iterator; + using const_iterator = BaseClass::const_iterator; + + /** + * Default constructor. Generate an empty vector without any blocks. + */ + BlockVector() = default; + + /** + * Constructor. Generate a block vector with @p n_blocks blocks, each of + * which is a parallel vector across @p communicator with @p block_size + * elements of which @p local_size elements are stored on the present + * process. + */ + explicit BlockVector(const unsigned int n_blocks, + const MPI_Comm & communicator, + const size_type block_size, + const size_type local_size); + + /** + * Copy constructor. Set all the properties of the parallel vector to + * those of the given argument and copy the elements. + */ + BlockVector(const BlockVector &V); + + /** + * Constructor. Set the number of blocks to block_sizes.size() + * and initialize each block with block_sizes[i] zero elements. + * The individual blocks are distributed across the given communicator, + * and each store local_elements[i] elements on the present + * process. + */ + BlockVector(const std::vector &block_sizes, + const MPI_Comm & communicator, + const std::vector &local_elements); + + /** + * Create a BlockVector with parallel_partitioning.size() blocks, each + * initialized with the given IndexSet. + */ + explicit BlockVector(const std::vector ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Same as above, but include ghost elements + */ + BlockVector(const std::vector ¶llel_partitioning, + const std::vector &ghost_indices, + const MPI_Comm & communicator); + + + + /** + * Destructor. Clears memory + */ + ~BlockVector() override = default; + + /** + * Copy operator: fill all components of the vector that are locally + * stored with the given scalar value. + */ + BlockVector & + operator=(const value_type s); + + /** + * Copy operator for arguments of the same type. + */ + BlockVector & + operator=(const BlockVector &V); + + /** + * Reinitialize the BlockVector to contain @p n_blocks of size @p + * block_size, each of which stores @p local_size elements locally. The + * @p communicator argument denotes which MPI channel each of these + * blocks shall communicate. + * + * If omit_zeroing_entries==false, the vector is filled with + * zeros. + */ + void + reinit(const unsigned int n_blocks, + const MPI_Comm & communicator, + const size_type block_size, + const size_type local_size, + const bool omit_zeroing_entries = false); + + /** + * Reinitialize the BlockVector such that it contains + * block_sizes.size() blocks. Each block is reinitialized to + * dimension block_sizes[i]. Each of them stores + * local_sizes[i] elements on the present process. + * + * If the number of blocks is the same as before this function was + * called, all vectors remain the same and reinit() is called for each + * vector. + * + * If omit_zeroing_entries==false, the vector is filled with + * zeros. + * + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() of one of the blocks, then subsequent + * actions on this object may yield unpredictable results since they may + * be routed to the wrong block. + */ + void + reinit(const std::vector &block_sizes, + const MPI_Comm & communicator, + const std::vector &local_sizes, + const bool omit_zeroing_entries = false); + + /** + * Change the dimension to that of the vector V. The same + * applies as for the other reinit() function. + * + * The elements of V are not copied, i.e. this function is the + * same as calling reinit (V.size(), omit_zeroing_entries). + * + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() on one of the blocks, then subsequent + * actions on this object may yield unpredictable results since they may + * be routed to the wrong block. + */ + void + reinit(const BlockVector &V, const bool omit_zeroing_entries = false); + + /** + * Reinitialize the BlockVector using IndexSets. See the constructor + * with the same arguments for details. + */ + void + reinit(const std::vector ¶llel_partitioning, + const MPI_Comm & communicator); + + /** + * Same as above but include ghost entries. + */ + void + reinit(const std::vector ¶llel_partitioning, + const std::vector &ghost_entries, + const MPI_Comm & communicator); + + /** + * Change the number of blocks to num_blocks. The individual + * blocks will get initialized with zero size, so it is assumed that the + * user resizes the individual blocks by herself in an appropriate way, + * and calls collect_sizes afterwards. + */ + void + reinit(const unsigned int num_blocks); + + /** + * Return if this vector is a ghosted vector (and thus read-only). + */ + bool + has_ghost_elements() const; + + /** + * Return a reference to the MPI communicator object in use with this + * vector. + */ + const MPI_Comm & + get_mpi_communicator() const; + + /** + * Swap the contents of this vector and the other vector v. One + * could do this operation with a temporary variable and copying over + * the data elements, but this function is significantly more efficient + * since it only swaps the pointers to the data of the two vectors and + * therefore does not need to allocate temporary storage and move data + * around. + * + * Limitation: right now this function only works if both vectors have + * the same number of blocks. If needed, the numbers of blocks should be + * exchanged, too. + * + * This function is analogous to the swap() function of all C++ + * standard containers. Also, there is a global function swap(u,v) that + * simply calls u.swap(v), again in analogy to standard + * functions. + */ + void + swap(BlockVector &v); + + /** + * Print to a stream. + */ + void + print(std::ostream & out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Exception + */ + DeclException0(ExcIteratorRangeDoesNotMatchVectorSize); + /** + * Exception + */ + DeclException0(ExcNonMatchingBlockVectors); + }; + + /*@}*/ + + /*--------------------- Inline functions --------------------------------*/ + + inline BlockVector::BlockVector(const unsigned int n_blocks, + const MPI_Comm & communicator, + const size_type block_size, + const size_type local_size) + { + reinit(n_blocks, communicator, block_size, local_size); + } + + + + inline BlockVector::BlockVector( + const std::vector &block_sizes, + const MPI_Comm & communicator, + const std::vector &local_elements) + { + reinit(block_sizes, communicator, local_elements, false); + } + + + inline BlockVector::BlockVector(const BlockVector &v) + : BlockVectorBase() + { + this->components.resize(v.n_blocks()); + this->block_indices = v.block_indices; + + for (unsigned int i = 0; i < this->n_blocks(); ++i) + this->components[i] = v.components[i]; + } + + inline BlockVector::BlockVector( + const std::vector ¶llel_partitioning, + const MPI_Comm & communicator) + { + reinit(parallel_partitioning, communicator); + } + + inline BlockVector::BlockVector( + const std::vector ¶llel_partitioning, + const std::vector &ghost_indices, + const MPI_Comm & communicator) + { + reinit(parallel_partitioning, ghost_indices, communicator); + } + + inline BlockVector & + BlockVector::operator=(const value_type s) + { + BaseClass::operator=(s); + return *this; + } + + inline BlockVector & + BlockVector::operator=(const BlockVector &v) + { + // we only allow assignment to vectors with the same number of blocks + // or to an empty BlockVector + Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(), + ExcDimensionMismatch(n_blocks(), v.n_blocks())); + + if (this->n_blocks() != v.n_blocks()) + reinit(v.n_blocks()); + + for (size_type i = 0; i < this->n_blocks(); ++i) + this->components[i] = v.block(i); + + collect_sizes(); + + return *this; + } + + + + inline void + BlockVector::reinit(const unsigned int n_blocks, + const MPI_Comm & communicator, + const size_type block_size, + const size_type local_size, + const bool omit_zeroing_entries) + { + reinit(std::vector(n_blocks, block_size), + communicator, + std::vector(n_blocks, local_size), + omit_zeroing_entries); + } + + + + inline void + BlockVector::reinit(const std::vector &block_sizes, + const MPI_Comm & communicator, + const std::vector &local_sizes, + const bool omit_zeroing_entries) + { + this->block_indices.reinit(block_sizes); + if (this->components.size() != this->n_blocks()) + this->components.resize(this->n_blocks()); + + for (unsigned int i = 0; i < this->n_blocks(); ++i) + this->components[i].reinit(communicator, + block_sizes[i], + local_sizes[i], + omit_zeroing_entries); + } + + + inline void + BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries) + { + this->block_indices = v.get_block_indices(); + if (this->components.size() != this->n_blocks()) + this->components.resize(this->n_blocks()); + + for (unsigned int i = 0; i < this->n_blocks(); ++i) + block(i).reinit(v.block(i), omit_zeroing_entries); + } + + inline void + BlockVector::reinit(const std::vector ¶llel_partitioning, + const MPI_Comm & communicator) + { + std::vector sizes(parallel_partitioning.size()); + for (unsigned int i = 0; i < parallel_partitioning.size(); ++i) + sizes[i] = parallel_partitioning[i].size(); + + this->block_indices.reinit(sizes); + if (this->components.size() != this->n_blocks()) + this->components.resize(this->n_blocks()); + + for (unsigned int i = 0; i < this->n_blocks(); ++i) + block(i).reinit(parallel_partitioning[i], communicator); + } + + inline void + BlockVector::reinit(const std::vector ¶llel_partitioning, + const std::vector &ghost_entries, + const MPI_Comm & communicator) + { + std::vector sizes(parallel_partitioning.size()); + for (unsigned int i = 0; i < parallel_partitioning.size(); ++i) + sizes[i] = parallel_partitioning[i].size(); + + this->block_indices.reinit(sizes); + if (this->components.size() != this->n_blocks()) + this->components.resize(this->n_blocks()); + + for (unsigned int i = 0; i < this->n_blocks(); ++i) + block(i).reinit(parallel_partitioning[i], + ghost_entries[i], + communicator); + } + + + + inline const MPI_Comm & + BlockVector::get_mpi_communicator() const + { + return block(0).get_mpi_communicator(); + } + + inline bool + BlockVector::has_ghost_elements() const + { + bool ghosted = block(0).has_ghost_elements(); +# ifdef DEBUG + for (unsigned int i = 0; i < this->n_blocks(); ++i) + Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError()); +# endif + return ghosted; + } + + + inline void + BlockVector::swap(BlockVector &v) + { + std::swap(this->components, v.components); + + ::dealii::swap(this->block_indices, v.block_indices); + } + + + + inline void + BlockVector::print(std::ostream & out, + const unsigned int precision, + const bool scientific, + const bool across) const + { + for (unsigned int i = 0; i < this->n_blocks(); ++i) + { + if (across) + out << 'C' << i << ':'; + else + out << "Component " << i << std::endl; + this->components[i].print(out, precision, scientific, across); + } + } + + + + /** + * Global function which overloads the default implementation of the C++ + * standard library which uses a temporary object. The function simply + * exchanges the data of the two vectors. + * + * @relatesalso PETScWrappers::MPI::BlockVector + * @author Wolfgang Bangerth, 2000 + */ + inline void + swap(BlockVector &u, BlockVector &v) + { + u.swap(v); + } + + } // namespace MPI + +} // namespace PETScWrappers + +namespace internal +{ + namespace LinearOperatorImplementation + { + template + class ReinitHelper; + + /** + * A helper class used internally in linear_operator.h. Specialization for + * PETScWrappers::MPI::BlockVector. + */ + template <> + class ReinitHelper + { + public: + template + static void + reinit_range_vector(const Matrix & matrix, + PETScWrappers::MPI::BlockVector &v, + bool /*omit_zeroing_entries*/) + { + v.reinit(matrix.locally_owned_range_indices(), + matrix.get_mpi_communicator()); + } + + template + static void + reinit_domain_vector(const Matrix & matrix, + PETScWrappers::MPI::BlockVector &v, + bool /*omit_zeroing_entries*/) + { + v.reinit(matrix.locally_owned_domain_indices(), + matrix.get_mpi_communicator()); + } + }; + + } // namespace LinearOperatorImplementation +} /* namespace internal */ + + +/** + * Declare dealii::PETScWrappers::MPI::BlockVector as distributed vector. + * + * @author Uwe Koecher, 2017 + */ +template <> +struct is_serial_vector : std::false_type +{}; + + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_WITH_PETSC + +#endif diff --git a/include/deal.II/lac/petsc_matrix_free.h b/include/deal.II/lac/petsc_matrix_free.h index ff3aa5c44c..2b41841a02 100644 --- a/include/deal.II/lac/petsc_matrix_free.h +++ b/include/deal.II/lac/petsc_matrix_free.h @@ -22,7 +22,7 @@ # ifdef DEAL_II_WITH_PETSC # include # include -# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h b/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h index 4502c924c1..617a95a859 100644 --- a/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h +++ b/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h @@ -19,343 +19,9 @@ #include -#ifdef DEAL_II_WITH_PETSC +#include -# include - -# include -# include -# include -# include -# include - -# include - -DEAL_II_NAMESPACE_OPEN - - - -namespace PETScWrappers -{ - namespace MPI - { - /*! @addtogroup PETScWrappers - *@{ - */ - - /** - * Blocked sparse matrix based on the PETScWrappers::MPI::SparseMatrix - * class. This class implements the functions that are specific to the - * PETSc SparseMatrix base objects for a blocked sparse matrix, and leaves - * the actual work relaying most of the calls to the individual blocks to - * the functions implemented in the base class. See there also for a - * description of when this class is useful. - * - * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices - * do not have external objects for the sparsity patterns. Thus, one does - * not determine the size of the individual blocks of a block matrix of - * this type by attaching a block sparsity pattern, but by calling - * reinit() to set the number of blocks and then by setting the size of - * each block separately. In order to fix the data structures of the block - * matrix, it is then necessary to let it know that we have changed the - * sizes of the underlying matrices. For this, one has to call the - * collect_sizes() function, for much the same reason as is documented - * with the BlockSparsityPattern class. - * - * @ingroup Matrix1 @see - * @ref GlossBlockLA "Block (linear algebra)" - * @author Wolfgang Bangerth, 2004 - */ - class BlockSparseMatrix : public BlockMatrixBase - { - public: - /** - * Typedef the base class for simpler access to its own alias. - */ - using BaseClass = BlockMatrixBase; - - /** - * Typedef the type of the underlying matrix. - */ - using BlockType = BaseClass::BlockType; - - /** - * Import the alias from the base class. - */ - using value_type = BaseClass::value_type; - using pointer = BaseClass::pointer; - using const_pointer = BaseClass::const_pointer; - using reference = BaseClass::reference; - using const_reference = BaseClass::const_reference; - using size_type = BaseClass::size_type; - using iterator = BaseClass::iterator; - using const_iterator = BaseClass::const_iterator; - - /** - * Constructor; initializes the matrix to be empty, without any - * structure, i.e. the matrix is not usable at all. This constructor is - * therefore only useful for matrices which are members of a class. All - * other matrices should be created at a point in the data flow where - * all necessary information is available. - * - * You have to initialize the matrix before usage with - * reinit(BlockSparsityPattern). The number of blocks per row and column - * are then determined by that function. - */ - BlockSparseMatrix() = default; - - /** - * Destructor. - */ - ~BlockSparseMatrix() override = default; - - /** - * Pseudo copy operator only copying empty objects. The sizes of the - * block matrices need to be the same. - */ - BlockSparseMatrix & - operator=(const BlockSparseMatrix &); - - /** - * This operator assigns a scalar to a matrix. Since this does usually - * not make much sense (should we set all matrix entries to this value? - * Only the nonzero entries of the sparsity pattern?), this operation is - * only allowed if the actual value to be assigned is zero. This - * operator only exists to allow for the obvious notation - * matrix=0, which sets all elements of the matrix to zero, but - * keep the sparsity pattern previously used. - */ - BlockSparseMatrix & - operator=(const double d); - - /** - * Resize the matrix, by setting the number of block rows and columns. - * This deletes all blocks and replaces them with uninitialized ones, - * i.e. ones for which also the sizes are not yet set. You have to do - * that by calling the @p reinit functions of the blocks themselves. Do - * not forget to call collect_sizes() after that on this object. - * - * The reason that you have to set sizes of the blocks yourself is that - * the sizes may be varying, the maximum number of elements per row may - * be varying, etc. It is simpler not to reproduce the interface of the - * SparsityPattern class here but rather let the user call whatever - * function she desires. - */ - void - reinit(const size_type n_block_rows, const size_type n_block_columns); - - - /** - * Efficiently reinit the block matrix for a parallel computation. Only - * the BlockSparsityPattern of the Simple type can efficiently store - * large sparsity patterns in parallel, so this is the only supported - * argument. The IndexSets describe the locally owned range of DoFs for - * each block. Note that the IndexSets needs to be ascending and 1:1. - * For a symmetric structure hand in the same vector for the first two - * arguments. - */ - void - reinit(const std::vector & rows, - const std::vector & cols, - const BlockDynamicSparsityPattern &bdsp, - const MPI_Comm & com); - - - /** - * Same as above but for a symmetric structure only. - */ - void - reinit(const std::vector & sizes, - const BlockDynamicSparsityPattern &bdsp, - const MPI_Comm & com); - - - - /** - * Matrix-vector multiplication: let $dst = M*src$ with $M$ being this - * matrix. - */ - void - vmult(BlockVector &dst, const BlockVector &src) const; - - /** - * Matrix-vector multiplication. Just like the previous function, but - * only applicable if the matrix has only one block column. - */ - void - vmult(BlockVector &dst, const Vector &src) const; - - /** - * Matrix-vector multiplication. Just like the previous function, but - * only applicable if the matrix has only one block row. - */ - void - vmult(Vector &dst, const BlockVector &src) const; - - /** - * Matrix-vector multiplication. Just like the previous function, but - * only applicable if the matrix has only one block. - */ - void - vmult(Vector &dst, const Vector &src) const; - - /** - * Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this - * matrix. This function does the same as vmult() but takes the - * transposed matrix. - */ - void - Tvmult(BlockVector &dst, const BlockVector &src) const; - - /** - * Matrix-vector multiplication. Just like the previous function, but - * only applicable if the matrix has only one block row. - */ - void - Tvmult(BlockVector &dst, const Vector &src) const; - - /** - * Matrix-vector multiplication. Just like the previous function, but - * only applicable if the matrix has only one block column. - */ - void - Tvmult(Vector &dst, const BlockVector &src) const; - - /** - * Matrix-vector multiplication. Just like the previous function, but - * only applicable if the matrix has only one block. - */ - void - Tvmult(Vector &dst, const Vector &src) const; - - /** - * This function collects the sizes of the sub-objects and stores them - * in internal arrays, in order to be able to relay global indices into - * the matrix to indices into the subobjects. You *must* call this - * function each time after you have changed the size of the sub- - * objects. - */ - void - collect_sizes(); - - /** - * Return the partitioning of the domain space of this matrix, i.e., the - * partitioning of the vectors this matrix has to be multiplied with. - */ - std::vector - locally_owned_domain_indices() const; - - /** - * Return the partitioning of the range space of this matrix, i.e., the - * partitioning of the vectors that are result from matrix-vector - * products. - */ - std::vector - locally_owned_range_indices() const; - - /** - * Return a reference to the MPI communicator object in use with this - * matrix. - */ - const MPI_Comm & - get_mpi_communicator() const; - - /** - * Make the clear() function in the base class visible, though it is - * protected. - */ - using BlockMatrixBase::clear; - }; - - - - /*@}*/ - - // ------------- inline and template functions ----------------- - - inline BlockSparseMatrix & - BlockSparseMatrix::operator=(const double d) - { - Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue()); - - for (size_type r = 0; r < this->n_block_rows(); ++r) - for (size_type c = 0; c < this->n_block_cols(); ++c) - this->block(r, c) = d; - - return *this; - } - - - - inline void - BlockSparseMatrix::vmult(BlockVector &dst, const BlockVector &src) const - { - BaseClass::vmult_block_block(dst, src); - } - - - - inline void - BlockSparseMatrix::vmult(BlockVector &dst, const Vector &src) const - { - BaseClass::vmult_block_nonblock(dst, src); - } - - - - inline void - BlockSparseMatrix::vmult(Vector &dst, const BlockVector &src) const - { - BaseClass::vmult_nonblock_block(dst, src); - } - - - - inline void - BlockSparseMatrix::vmult(Vector &dst, const Vector &src) const - { - BaseClass::vmult_nonblock_nonblock(dst, src); - } - - - inline void - BlockSparseMatrix::Tvmult(BlockVector &dst, const BlockVector &src) const - { - BaseClass::Tvmult_block_block(dst, src); - } - - - - inline void - BlockSparseMatrix::Tvmult(BlockVector &dst, const Vector &src) const - { - BaseClass::Tvmult_block_nonblock(dst, src); - } - - - - inline void - BlockSparseMatrix::Tvmult(Vector &dst, const BlockVector &src) const - { - BaseClass::Tvmult_nonblock_block(dst, src); - } - - - - inline void - BlockSparseMatrix::Tvmult(Vector &dst, const Vector &src) const - { - BaseClass::Tvmult_nonblock_nonblock(dst, src); - } - - } // namespace MPI - -} // namespace PETScWrappers - - -DEAL_II_NAMESPACE_CLOSE - - -#endif // DEAL_II_WITH_PETSC +#pragma DEAL_II_WARNING( \ + "This file is deprecated. Use deal.II/lac/petsc_block_sparse_matrix.h instead!") #endif // dealii_petsc_parallel_block_sparse_matrix_h diff --git a/include/deal.II/lac/petsc_parallel_block_vector.h b/include/deal.II/lac/petsc_parallel_block_vector.h index 56206b91e8..5a3ff99e34 100644 --- a/include/deal.II/lac/petsc_parallel_block_vector.h +++ b/include/deal.II/lac/petsc_parallel_block_vector.h @@ -19,546 +19,9 @@ #include -#ifdef DEAL_II_WITH_PETSC +#include -# include -# include -# include -# include -# include - -DEAL_II_NAMESPACE_OPEN - - -namespace PETScWrappers -{ - // forward declaration - class BlockVector; - - namespace MPI - { - /*! @addtogroup PETScWrappers - *@{ - */ - - /** - * An implementation of block vectors based on the parallel vector class - * implemented in PETScWrappers. While the base class provides for most of - * the interface, this class handles the actual allocation of vectors and - * provides functions that are specific to the underlying vector type. - * - * The model of distribution of data is such that each of the blocks is - * distributed across all MPI processes named in the MPI communicator. - * I.e. we don't just distribute the whole vector, but each component. In - * the constructors and reinit() functions, one therefore not only has to - * specify the sizes of the individual blocks, but also the number of - * elements of each of these blocks to be stored on the local process. - * - * @ingroup Vectors @see - * @ref GlossBlockLA "Block (linear algebra)" - * @author Wolfgang Bangerth, 2004 - */ - class BlockVector : public BlockVectorBase - { - public: - /** - * Typedef the base class for simpler access to its own alias. - */ - using BaseClass = BlockVectorBase; - - /** - * Typedef the type of the underlying vector. - */ - using BlockType = BaseClass::BlockType; - - /** - * Import the alias from the base class. - */ - using value_type = BaseClass::value_type; - using pointer = BaseClass::pointer; - using const_pointer = BaseClass::const_pointer; - using reference = BaseClass::reference; - using const_reference = BaseClass::const_reference; - using size_type = BaseClass::size_type; - using iterator = BaseClass::iterator; - using const_iterator = BaseClass::const_iterator; - - /** - * Default constructor. Generate an empty vector without any blocks. - */ - BlockVector() = default; - - /** - * Constructor. Generate a block vector with @p n_blocks blocks, each of - * which is a parallel vector across @p communicator with @p block_size - * elements of which @p local_size elements are stored on the present - * process. - */ - explicit BlockVector(const unsigned int n_blocks, - const MPI_Comm & communicator, - const size_type block_size, - const size_type local_size); - - /** - * Copy constructor. Set all the properties of the parallel vector to - * those of the given argument and copy the elements. - */ - BlockVector(const BlockVector &V); - - /** - * Constructor. Set the number of blocks to block_sizes.size() - * and initialize each block with block_sizes[i] zero elements. - * The individual blocks are distributed across the given communicator, - * and each store local_elements[i] elements on the present - * process. - */ - BlockVector(const std::vector &block_sizes, - const MPI_Comm & communicator, - const std::vector &local_elements); - - /** - * Create a BlockVector with parallel_partitioning.size() blocks, each - * initialized with the given IndexSet. - */ - explicit BlockVector(const std::vector ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD); - - /** - * Same as above, but include ghost elements - */ - BlockVector(const std::vector ¶llel_partitioning, - const std::vector &ghost_indices, - const MPI_Comm & communicator); - - - - /** - * Destructor. Clears memory - */ - ~BlockVector() override = default; - - /** - * Copy operator: fill all components of the vector that are locally - * stored with the given scalar value. - */ - BlockVector & - operator=(const value_type s); - - /** - * Copy operator for arguments of the same type. - */ - BlockVector & - operator=(const BlockVector &V); - - /** - * Reinitialize the BlockVector to contain @p n_blocks of size @p - * block_size, each of which stores @p local_size elements locally. The - * @p communicator argument denotes which MPI channel each of these - * blocks shall communicate. - * - * If omit_zeroing_entries==false, the vector is filled with - * zeros. - */ - void - reinit(const unsigned int n_blocks, - const MPI_Comm & communicator, - const size_type block_size, - const size_type local_size, - const bool omit_zeroing_entries = false); - - /** - * Reinitialize the BlockVector such that it contains - * block_sizes.size() blocks. Each block is reinitialized to - * dimension block_sizes[i]. Each of them stores - * local_sizes[i] elements on the present process. - * - * If the number of blocks is the same as before this function was - * called, all vectors remain the same and reinit() is called for each - * vector. - * - * If omit_zeroing_entries==false, the vector is filled with - * zeros. - * - * Note that you must call this (or the other reinit() functions) - * function, rather than calling the reinit() functions of an individual - * block, to allow the block vector to update its caches of vector - * sizes. If you call reinit() of one of the blocks, then subsequent - * actions on this object may yield unpredictable results since they may - * be routed to the wrong block. - */ - void - reinit(const std::vector &block_sizes, - const MPI_Comm & communicator, - const std::vector &local_sizes, - const bool omit_zeroing_entries = false); - - /** - * Change the dimension to that of the vector V. The same - * applies as for the other reinit() function. - * - * The elements of V are not copied, i.e. this function is the - * same as calling reinit (V.size(), omit_zeroing_entries). - * - * Note that you must call this (or the other reinit() functions) - * function, rather than calling the reinit() functions of an individual - * block, to allow the block vector to update its caches of vector - * sizes. If you call reinit() on one of the blocks, then subsequent - * actions on this object may yield unpredictable results since they may - * be routed to the wrong block. - */ - void - reinit(const BlockVector &V, const bool omit_zeroing_entries = false); - - /** - * Reinitialize the BlockVector using IndexSets. See the constructor - * with the same arguments for details. - */ - void - reinit(const std::vector ¶llel_partitioning, - const MPI_Comm & communicator); - - /** - * Same as above but include ghost entries. - */ - void - reinit(const std::vector ¶llel_partitioning, - const std::vector &ghost_entries, - const MPI_Comm & communicator); - - /** - * Change the number of blocks to num_blocks. The individual - * blocks will get initialized with zero size, so it is assumed that the - * user resizes the individual blocks by herself in an appropriate way, - * and calls collect_sizes afterwards. - */ - void - reinit(const unsigned int num_blocks); - - /** - * Return if this vector is a ghosted vector (and thus read-only). - */ - bool - has_ghost_elements() const; - - /** - * Return a reference to the MPI communicator object in use with this - * vector. - */ - const MPI_Comm & - get_mpi_communicator() const; - - /** - * Swap the contents of this vector and the other vector v. One - * could do this operation with a temporary variable and copying over - * the data elements, but this function is significantly more efficient - * since it only swaps the pointers to the data of the two vectors and - * therefore does not need to allocate temporary storage and move data - * around. - * - * Limitation: right now this function only works if both vectors have - * the same number of blocks. If needed, the numbers of blocks should be - * exchanged, too. - * - * This function is analogous to the swap() function of all C++ - * standard containers. Also, there is a global function swap(u,v) that - * simply calls u.swap(v), again in analogy to standard - * functions. - */ - void - swap(BlockVector &v); - - /** - * Print to a stream. - */ - void - print(std::ostream & out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * Exception - */ - DeclException0(ExcIteratorRangeDoesNotMatchVectorSize); - /** - * Exception - */ - DeclException0(ExcNonMatchingBlockVectors); - }; - - /*@}*/ - - /*--------------------- Inline functions --------------------------------*/ - - inline BlockVector::BlockVector(const unsigned int n_blocks, - const MPI_Comm & communicator, - const size_type block_size, - const size_type local_size) - { - reinit(n_blocks, communicator, block_size, local_size); - } - - - - inline BlockVector::BlockVector( - const std::vector &block_sizes, - const MPI_Comm & communicator, - const std::vector &local_elements) - { - reinit(block_sizes, communicator, local_elements, false); - } - - - inline BlockVector::BlockVector(const BlockVector &v) - : BlockVectorBase() - { - this->components.resize(v.n_blocks()); - this->block_indices = v.block_indices; - - for (unsigned int i = 0; i < this->n_blocks(); ++i) - this->components[i] = v.components[i]; - } - - inline BlockVector::BlockVector( - const std::vector ¶llel_partitioning, - const MPI_Comm & communicator) - { - reinit(parallel_partitioning, communicator); - } - - inline BlockVector::BlockVector( - const std::vector ¶llel_partitioning, - const std::vector &ghost_indices, - const MPI_Comm & communicator) - { - reinit(parallel_partitioning, ghost_indices, communicator); - } - - inline BlockVector & - BlockVector::operator=(const value_type s) - { - BaseClass::operator=(s); - return *this; - } - - inline BlockVector & - BlockVector::operator=(const BlockVector &v) - { - // we only allow assignment to vectors with the same number of blocks - // or to an empty BlockVector - Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(), - ExcDimensionMismatch(n_blocks(), v.n_blocks())); - - if (this->n_blocks() != v.n_blocks()) - reinit(v.n_blocks()); - - for (size_type i = 0; i < this->n_blocks(); ++i) - this->components[i] = v.block(i); - - collect_sizes(); - - return *this; - } - - - - inline void - BlockVector::reinit(const unsigned int n_blocks, - const MPI_Comm & communicator, - const size_type block_size, - const size_type local_size, - const bool omit_zeroing_entries) - { - reinit(std::vector(n_blocks, block_size), - communicator, - std::vector(n_blocks, local_size), - omit_zeroing_entries); - } - - - - inline void - BlockVector::reinit(const std::vector &block_sizes, - const MPI_Comm & communicator, - const std::vector &local_sizes, - const bool omit_zeroing_entries) - { - this->block_indices.reinit(block_sizes); - if (this->components.size() != this->n_blocks()) - this->components.resize(this->n_blocks()); - - for (unsigned int i = 0; i < this->n_blocks(); ++i) - this->components[i].reinit(communicator, - block_sizes[i], - local_sizes[i], - omit_zeroing_entries); - } - - - inline void - BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries) - { - this->block_indices = v.get_block_indices(); - if (this->components.size() != this->n_blocks()) - this->components.resize(this->n_blocks()); - - for (unsigned int i = 0; i < this->n_blocks(); ++i) - block(i).reinit(v.block(i), omit_zeroing_entries); - } - - inline void - BlockVector::reinit(const std::vector ¶llel_partitioning, - const MPI_Comm & communicator) - { - std::vector sizes(parallel_partitioning.size()); - for (unsigned int i = 0; i < parallel_partitioning.size(); ++i) - sizes[i] = parallel_partitioning[i].size(); - - this->block_indices.reinit(sizes); - if (this->components.size() != this->n_blocks()) - this->components.resize(this->n_blocks()); - - for (unsigned int i = 0; i < this->n_blocks(); ++i) - block(i).reinit(parallel_partitioning[i], communicator); - } - - inline void - BlockVector::reinit(const std::vector ¶llel_partitioning, - const std::vector &ghost_entries, - const MPI_Comm & communicator) - { - std::vector sizes(parallel_partitioning.size()); - for (unsigned int i = 0; i < parallel_partitioning.size(); ++i) - sizes[i] = parallel_partitioning[i].size(); - - this->block_indices.reinit(sizes); - if (this->components.size() != this->n_blocks()) - this->components.resize(this->n_blocks()); - - for (unsigned int i = 0; i < this->n_blocks(); ++i) - block(i).reinit(parallel_partitioning[i], - ghost_entries[i], - communicator); - } - - - - inline const MPI_Comm & - BlockVector::get_mpi_communicator() const - { - return block(0).get_mpi_communicator(); - } - - inline bool - BlockVector::has_ghost_elements() const - { - bool ghosted = block(0).has_ghost_elements(); -# ifdef DEBUG - for (unsigned int i = 0; i < this->n_blocks(); ++i) - Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError()); -# endif - return ghosted; - } - - - inline void - BlockVector::swap(BlockVector &v) - { - std::swap(this->components, v.components); - - ::dealii::swap(this->block_indices, v.block_indices); - } - - - - inline void - BlockVector::print(std::ostream & out, - const unsigned int precision, - const bool scientific, - const bool across) const - { - for (unsigned int i = 0; i < this->n_blocks(); ++i) - { - if (across) - out << 'C' << i << ':'; - else - out << "Component " << i << std::endl; - this->components[i].print(out, precision, scientific, across); - } - } - - - - /** - * Global function which overloads the default implementation of the C++ - * standard library which uses a temporary object. The function simply - * exchanges the data of the two vectors. - * - * @relatesalso PETScWrappers::MPI::BlockVector - * @author Wolfgang Bangerth, 2000 - */ - inline void - swap(BlockVector &u, BlockVector &v) - { - u.swap(v); - } - - } // namespace MPI - -} // namespace PETScWrappers - -namespace internal -{ - namespace LinearOperatorImplementation - { - template - class ReinitHelper; - - /** - * A helper class used internally in linear_operator.h. Specialization for - * PETScWrappers::MPI::BlockVector. - */ - template <> - class ReinitHelper - { - public: - template - static void - reinit_range_vector(const Matrix & matrix, - PETScWrappers::MPI::BlockVector &v, - bool /*omit_zeroing_entries*/) - { - v.reinit(matrix.locally_owned_range_indices(), - matrix.get_mpi_communicator()); - } - - template - static void - reinit_domain_vector(const Matrix & matrix, - PETScWrappers::MPI::BlockVector &v, - bool /*omit_zeroing_entries*/) - { - v.reinit(matrix.locally_owned_domain_indices(), - matrix.get_mpi_communicator()); - } - }; - - } // namespace LinearOperatorImplementation -} /* namespace internal */ - - -/** - * Declare dealii::PETScWrappers::MPI::BlockVector as distributed vector. - * - * @author Uwe Koecher, 2017 - */ -template <> -struct is_serial_vector : std::false_type -{}; - - -DEAL_II_NAMESPACE_CLOSE - -#endif // DEAL_II_WITH_PETSC +#pragma DEAL_II_WARNING( \ + "This file is deprecated. Use deal.II/lac/petsc_block_vector.h instead!") #endif diff --git a/include/deal.II/lac/petsc_parallel_sparse_matrix.h b/include/deal.II/lac/petsc_parallel_sparse_matrix.h index f193d02373..559d6c385c 100644 --- a/include/deal.II/lac/petsc_parallel_sparse_matrix.h +++ b/include/deal.II/lac/petsc_parallel_sparse_matrix.h @@ -18,515 +18,10 @@ # include -# ifdef DEAL_II_WITH_PETSC +# include -# include -# include -# include - -# include - -DEAL_II_NAMESPACE_OPEN - - -// forward declaration -template -class BlockMatrixBase; - - -namespace PETScWrappers -{ - namespace MPI - { - /** - * Implementation of a parallel sparse matrix class based on PETSc, with - * rows of the matrix distributed across an MPI network. All the - * functionality is actually in the base class, except for the calls to - * generate a parallel sparse matrix. This is possible since PETSc only - * works on an abstract matrix type and internally distributes to - * functions that do the actual work depending on the actual matrix type - * (much like using virtual functions). Only the functions creating a - * matrix of specific type differ, and are implemented in this particular - * class. - * - * There are a number of comments on the communication model as well as - * access to individual elements in the documentation to the parallel - * vector class. These comments apply here as well. - * - * - *

Partitioning of matrices

- * - * PETSc partitions parallel matrices so that each MPI process "owns" a - * certain number of rows (i.e. only this process stores the respective - * entries in these rows). The number of rows each process owns has to be - * passed to the constructors and reinit() functions via the argument @p - * local_rows. The individual values passed as @p local_rows on all the - * MPI processes of course have to add up to the global number of rows of - * the matrix. - * - * In addition to this, PETSc also partitions the rectangular chunk of the - * matrix it owns (i.e. the @p local_rows times n() elements in the - * matrix), so that matrix vector multiplications can be performed - * efficiently. This column-partitioning therefore has to match the - * partitioning of the vectors with which the matrix is multiplied, just - * as the row-partitioning has to match the partitioning of destination - * vectors. This partitioning is passed to the constructors and reinit() - * functions through the @p local_columns variable, which again has to add - * up to the global number of columns in the matrix. The name @p - * local_columns may be named inappropriately since it does not reflect - * that only these columns are stored locally, but it reflects the fact - * that these are the columns for which the elements of incoming vectors - * are stored locally. - * - * To make things even more complicated, PETSc needs a very good estimate - * of the number of elements to be stored in each row to be efficient. - * Otherwise it spends most of the time with allocating small chunks of - * memory, a process that can slow down programs to a crawl if it happens - * to often. As if a good estimate of the number of entries per row isn't - * even, it even needs to split this as follows: for each row it owns, it - * needs an estimate for the number of elements in this row that fall into - * the columns that are set apart for this process (see above), and the - * number of elements that are in the rest of the columns. - * - * Since in general this information is not readily available, most of the - * initializing functions of this class assume that all of the number of - * elements you give as an argument to @p n_nonzero_per_row or by @p - * row_lengths fall into the columns "owned" by this process, and none - * into the other ones. This is a fair guess for most of the rows, since - * in a good domain partitioning, nodes only interact with nodes that are - * within the same subdomain. It does not hold for nodes on the interfaces - * of subdomain, however, and for the rows corresponding to these nodes, - * PETSc will have to allocate additional memory, a costly process. - * - * The only way to avoid this is to tell PETSc where the actual entries of - * the matrix will be. For this, there are constructors and reinit() - * functions of this class that take a DynamicSparsityPattern object - * containing all this information. While in the general case it is - * sufficient if the constructors and reinit() functions know the number - * of local rows and columns, the functions getting a sparsity pattern - * also need to know the number of local rows (@p local_rows_per_process) - * and columns (@p local_columns_per_process) for all other processes, in - * order to compute which parts of the matrix are which. Thus, it is not - * sufficient to just count the number of degrees of freedom that belong - * to a particular process, but you have to have the numbers for all - * processes available at all processes. - * - * @ingroup PETScWrappers - * @ingroup Matrix1 - * @author Wolfgang Bangerth, 2004 - */ - class SparseMatrix : public MatrixBase - { - public: - /** - * Declare type for container size. - */ - using size_type = types::global_dof_index; - - /** - * A structure that describes some of the traits of this class in terms - * of its run-time behavior. Some other classes (such as the block - * matrix classes) that take one or other of the matrix classes as its - * template parameters can tune their behavior based on the variables in - * this class. - */ - struct Traits - { - /** - * It is not safe to elide additions of zeros to individual elements - * of this matrix. The reason is that additions to the matrix may - * trigger collective operations synchronizing buffers on multiple - * processes. If an addition is elided on one process, this may lead - * to other processes hanging in an infinite waiting loop. - */ - static const bool zero_addition_can_be_elided = false; - }; - - /** - * Default constructor. Create an empty matrix. - */ - SparseMatrix(); - - /** - * Destructor to free the PETSc object. - */ - ~SparseMatrix() override; - - /** - * Create a sparse matrix of dimensions @p m times @p n, with an initial - * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row - * nonzero elements per row (see documentation of the MatCreateAIJ PETSc - * function for more information about these parameters). PETSc is able - * to cope with the situation that more than this number of elements are - * later allocated for a row, but this involves copying data, and is - * thus expensive. - * - * For the meaning of the @p local_row and @p local_columns parameters, - * see the class documentation. - * - * The @p is_symmetric flag determines whether we should tell PETSc that - * the matrix is going to be symmetric (as indicated by the call - * MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc - * documentation states that one cannot form an ILU decomposition of a - * matrix for which this flag has been set to @p true, only an ICC. The - * default value of this flag is @p false. - * - * @deprecated This constructor is deprecated: please use the - * constructor with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - SparseMatrix(const MPI_Comm &communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric = false, - const size_type n_offdiag_nonzero_per_row = 0); - - /** - * Initialize a rectangular matrix with @p m rows and @p n columns. The - * maximal number of nonzero entries for diagonal and off- diagonal - * blocks of each row is given by the @p row_lengths and @p - * offdiag_row_lengths arrays. - * - * For the meaning of the @p local_row and @p local_columns parameters, - * see the class documentation. - * - * Just as for the other constructors: PETSc is able to cope with the - * situation that more than this number of elements are later allocated - * for a row, but this involves copying data, and is thus expensive. - * - * The @p is_symmetric flag determines whether we should tell PETSc that - * the matrix is going to be symmetric (as indicated by the call - * MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc - * documentation states that one cannot form an ILU decomposition of a - * matrix for which this flag has been set to @p true, only an ICC. The - * default value of this flag is @p false. - * - * @deprecated This constructor is deprecated: please use the - * constructor with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - SparseMatrix(const MPI_Comm & communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric = false, - const std::vector &offdiag_row_lengths = - std::vector()); - - /** - * Initialize using the given sparsity pattern with communication - * happening over the provided @p communicator. - * - * For the meaning of the @p local_rows_per_process and @p - * local_columns_per_process parameters, see the class documentation. - * - * Note that PETSc can be very slow if you do not provide it with a good - * estimate of the lengths of rows. Using the present function is a very - * efficient way to do this, as it uses the exact number of nonzero - * entries for each row of the matrix by using the given sparsity - * pattern argument. If the @p preset_nonzero_locations flag is @p true, - * this function in addition not only sets the correct row sizes up - * front, but also pre-allocated the correct nonzero entries in the - * matrix. - * - * PETsc allows to later add additional nonzero entries to a matrix, by - * simply writing to these elements. However, this will then lead to - * additional memory allocations which are very inefficient and will - * greatly slow down your program. It is therefore significantly more - * efficient to get memory allocation right from the start. - */ - template - SparseMatrix(const MPI_Comm & communicator, - const SparsityPatternType & sparsity_pattern, - const std::vector &local_rows_per_process, - const std::vector &local_columns_per_process, - const unsigned int this_process, - const bool preset_nonzero_locations = true); - - /** - * This operator assigns a scalar to a matrix. Since this does usually - * not make much sense (should we set all matrix entries to this value? - * Only the nonzero entries of the sparsity pattern?), this operation is - * only allowed if the actual value to be assigned is zero. This - * operator only exists to allow for the obvious notation - * matrix=0, which sets all elements of the matrix to zero, but - * keep the sparsity pattern previously used. - */ - SparseMatrix & - operator=(const value_type d); - - - /** - * Make a copy of the PETSc matrix @p other. It is assumed that both - * matrices have the same SparsityPattern. - */ - void - copy_from(const SparseMatrix &other); - - /** - * Throw away the present matrix and generate one that has the same - * properties as if it were created by the constructor of this class - * with the same argument list as the present function. - * - * @deprecated This overload of reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - reinit(const MPI_Comm &communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric = false, - const size_type n_offdiag_nonzero_per_row = 0); - - /** - * Throw away the present matrix and generate one that has the same - * properties as if it were created by the constructor of this class - * with the same argument list as the present function. - * - * @deprecated This overload of reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - reinit(const MPI_Comm & communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric = false, - const std::vector &offdiag_row_lengths = - std::vector()); - - /** - * Initialize using the given sparsity pattern with communication - * happening over the provided @p communicator. - * - * Note that PETSc can be very slow if you do not provide it with a good - * estimate of the lengths of rows. Using the present function is a very - * efficient way to do this, as it uses the exact number of nonzero - * entries for each row of the matrix by using the given sparsity - * pattern argument. If the @p preset_nonzero_locations flag is @p true, - * this function in addition not only sets the correct row sizes up - * front, but also pre-allocated the correct nonzero entries in the - * matrix. - * - * PETsc allows to later add additional nonzero entries to a matrix, by - * simply writing to these elements. However, this will then lead to - * additional memory allocations which are very inefficient and will - * greatly slow down your program. It is therefore significantly more - * efficient to get memory allocation right from the start. - */ - template - void - reinit(const MPI_Comm & communicator, - const SparsityPatternType & sparsity_pattern, - const std::vector &local_rows_per_process, - const std::vector &local_columns_per_process, - const unsigned int this_process, - const bool preset_nonzero_locations = true); - - /** - * Create a matrix where the size() of the IndexSets determine the - * global number of rows and columns and the entries of the IndexSet - * give the rows and columns for the calling processor. Note that only - * ascending, 1:1 IndexSets are supported. - */ - template - void - reinit(const IndexSet & local_rows, - const IndexSet & local_columns, - const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator); - - /** - * Initialize this matrix to have the same structure as @p other. This - * will not copy the values of the other matrix, but you can use - * copy_from() for this. - */ - void - reinit(const SparseMatrix &other); - - /** - * Return a reference to the MPI communicator object in use with this - * matrix. - */ - virtual const MPI_Comm & - get_mpi_communicator() const override; - - /** - * @addtogroup Exceptions - * @{ - */ - /** - * Exception - */ - DeclException2(ExcLocalRowsTooLarge, - int, - int, - << "The number of local rows " << arg1 - << " must be larger than the total number of rows " - << arg2); - //@} - - /** - * Return the square of the norm of the vector $v$ with respect to the - * norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is - * useful, e.g. in the finite element context, where the $L_2$ norm of a - * function equals the matrix norm with respect to the mass matrix of - * the vector representing the nodal values of the finite element - * function. - * - * Obviously, the matrix needs to be quadratic for this operation. - * - * The implementation of this function is not as efficient as the one in - * the @p MatrixBase class used in deal.II (i.e. the original one, not - * the PETSc wrapper class) since PETSc doesn't support this operation - * and needs a temporary vector. - */ - PetscScalar - matrix_norm_square(const Vector &v) const; - - /** - * Compute the matrix scalar product $\left(u^\ast,Mv\right)$. - * - * The implementation of this function is not as efficient as the one in - * the @p MatrixBase class used in deal.II (i.e. the original one, not - * the PETSc wrapper class) since PETSc doesn't support this operation - * and needs a temporary vector. - */ - PetscScalar - matrix_scalar_product(const Vector &u, const Vector &v) const; - - /** - * Return the partitioning of the domain space of this matrix, i.e., the - * partitioning of the vectors this matrix has to be multiplied with. - */ - IndexSet - locally_owned_domain_indices() const; - - /** - * Return the partitioning of the range space of this matrix, i.e., the - * partitioning of the vectors that result from matrix-vector - * products. - */ - IndexSet - locally_owned_range_indices() const; - - /** - * Perform the matrix-matrix multiplication $C = AB$, or, - * $C = A \text{diag}(V) B$ given a compatible vector $V$. - * - * This function calls MatrixBase::mmult() to do the actual work. - */ - void - mmult(SparseMatrix & C, - const SparseMatrix &B, - const MPI::Vector & V = MPI::Vector()) const; - - /** - * Perform the matrix-matrix multiplication with the transpose of - * this, i.e., $C = A^T B$, or, - * $C = A^T \text{diag}(V) B$ given a compatible vector $V$. - * - * This function calls MatrixBase::Tmmult() to do the actual work. - */ - void - Tmmult(SparseMatrix & C, - const SparseMatrix &B, - const MPI::Vector & V = MPI::Vector()) const; - - private: - /** - * Copy of the communicator object to be used for this parallel vector. - */ - MPI_Comm communicator; - - /** - * Do the actual work for the respective reinit() function and the - * matching constructor, i.e. create a matrix. Getting rid of the - * previous matrix is left to the caller. - * - * @deprecated This overload of do_reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - do_reinit(const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric = false, - const size_type n_offdiag_nonzero_per_row = 0); - - /** - * Same as previous function. - * - * @deprecated This overload of do_reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - do_reinit(const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric = false, - const std::vector &offdiag_row_lengths = - std::vector()); - - /** - * Same as previous functions. - */ - template - void - do_reinit(const SparsityPatternType & sparsity_pattern, - const std::vector &local_rows_per_process, - const std::vector &local_columns_per_process, - const unsigned int this_process, - const bool preset_nonzero_locations); - - /** - * Same as previous functions. - */ - template - void - do_reinit(const IndexSet & local_rows, - const IndexSet & local_columns, - const SparsityPatternType &sparsity_pattern); - - /** - * To allow calling protected prepare_add() and prepare_set(). - */ - friend class BlockMatrixBase; - }; - - - - // -------- template and inline functions ---------- - - inline const MPI_Comm & - SparseMatrix::get_mpi_communicator() const - { - return communicator; - } - } // namespace MPI -} // namespace PETScWrappers - -DEAL_II_NAMESPACE_CLOSE - -# endif // DEAL_II_WITH_PETSC +# pragma DEAL_II_WARNING( \ + "This file is deprecated. Use deal.II/lac/petsc_sparse_matrix.h instead.") #endif /*---------------------- petsc_parallel_sparse_matrix.h ---------------------*/ diff --git a/include/deal.II/lac/petsc_parallel_vector.h b/include/deal.II/lac/petsc_parallel_vector.h index 12c4bc2cd6..a238651932 100644 --- a/include/deal.II/lac/petsc_parallel_vector.h +++ b/include/deal.II/lac/petsc_parallel_vector.h @@ -16,558 +16,12 @@ #ifndef dealii_petsc_parallel_vector_h # define dealii_petsc_parallel_vector_h - # include -# ifdef DEAL_II_WITH_PETSC - -# include -# include - -# include -# include -# include -# include -# include - -DEAL_II_NAMESPACE_OPEN - - -/*! @addtogroup PETScWrappers - *@{ - */ -namespace PETScWrappers -{ - /** - * Namespace for PETSc classes that work in parallel over MPI, such as - * distributed vectors and matrices. - * - * @ingroup PETScWrappers - * @author Wolfgang Bangerth, 2004 - */ - namespace MPI - { - /** - * Implementation of a parallel vector class based on PETSC and using MPI - * communication to synchronize distributed operations. All the - * functionality is actually in the base class, except for the calls to - * generate a parallel vector. This is possible since PETSc only works on - * an abstract vector type and internally distributes to functions that do - * the actual work depending on the actual vector type (much like using - * virtual functions). Only the functions creating a vector of specific - * type differ, and are implemented in this particular class. - * - * - *

Parallel communication model

- * - * The parallel functionality of PETSc is built on top of the Message - * Passing Interface (MPI). MPI's communication model is built on - * collective communications: if one process wants something from another, - * that other process has to be willing to accept this communication. A - * process cannot query data from another process by calling a remote - * function, without that other process expecting such a transaction. The - * consequence is that most of the operations in the base class of this - * class have to be called collectively. For example, if you want to - * compute the l2 norm of a parallel vector, @em all processes across - * which this vector is shared have to call the @p l2_norm function. If - * you don't do this, but instead only call the @p l2_norm function on one - * process, then the following happens: This one process will call one of - * the collective MPI functions and wait for all the other processes to - * join in on this. Since the other processes don't call this function, - * you will either get a time-out on the first process, or, worse, by the - * time the next a call to a PETSc function generates an MPI message on - * the other processes, you will get a cryptic message that only a subset - * of processes attempted a communication. These bugs can be very hard to - * figure out, unless you are well-acquainted with the communication model - * of MPI, and know which functions may generate MPI messages. - * - * One particular case, where an MPI message may be generated unexpectedly - * is discussed below. - * - * - *

Accessing individual elements of a vector

- * - * PETSc does allow read access to individual elements of a vector, but in - * the distributed case only to elements that are stored locally. We - * implement this through calls like d=vec(i). However, if you - * access an element outside the locally stored range, an exception is - * generated. - * - * In contrast to read access, PETSc (and the respective deal.II wrapper - * classes) allow to write (or add) to individual elements of vectors, - * even if they are stored on a different process. You can do this - * writing, for example, vec(i)=d or vec(i)+=d, or - * similar operations. There is one catch, however, that may lead to very - * confusing error messages: PETSc requires application programs to call - * the compress() function when they switch from adding, to elements to - * writing to elements. The reasoning is that all processes might - * accumulate addition operations to elements, even if multiple processes - * write to the same elements. By the time we call compress() the next - * time, all these additions are executed. However, if one process adds to - * an element, and another overwrites to it, the order of execution would - * yield non-deterministic behavior if we don't make sure that a - * synchronization with compress() happens in between. - * - * In order to make sure these calls to compress() happen at the - * appropriate time, the deal.II wrappers keep a state variable that store - * which is the presently allowed operation: additions or writes. If it - * encounters an operation of the opposite kind, it calls compress() and - * flips the state. This can sometimes lead to very confusing behavior, in - * code that may for example look like this: - * @code - * PETScWrappers::MPI::Vector vector; - * ... - * // do some write operations on the vector - * for (unsigned int i=0; iv=0;. Presumably, the user wants to set every element of the - * vector to zero, but instead, what happens is this call: - * v=Vector@(0);, i.e. the vector is replaced by one - * of length zero. - */ - explicit Vector(const MPI_Comm &communicator, - const size_type n, - const size_type local_size); - - - /** - * Copy-constructor from deal.II vectors. Sets the dimension to that of - * the given vector, and copies all elements. - * - * @arg local_size denotes the size of the chunk that shall be stored on - * the present process. - * - * @arg communicator denotes the MPI communicator over which the - * different parts of the vector shall communicate - */ - template - explicit Vector(const MPI_Comm & communicator, - const dealii::Vector &v, - const size_type local_size); - - - /** - * Copy-constructor the values from a PETSc wrapper vector class. - * - * @arg local_size denotes the size of the chunk that shall be stored on - * the present process. - * - * @arg communicator denotes the MPI communicator over which the - * different parts of the vector shall communicate - * - * @deprecated The use of objects that are explicitly of type VectorBase - * is deprecated: use PETScWrappers::MPI::Vector instead. - */ - DEAL_II_DEPRECATED - explicit Vector(const MPI_Comm & communicator, - const VectorBase &v, - const size_type local_size); - - /** - * Construct a new parallel ghosted PETSc vector from IndexSets. - * - * Note that @p local must be ascending and 1:1, see - * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in - * @p local need to be contiguous, meaning you can only create vectors - * from a DoFHandler with several finite element components if they are - * not reordered by component (use a PETScWrappers::BlockVector - * otherwise). The global size of the vector is determined by - * local.size(). The global indices in @p ghost are supplied as ghost - * indices so that they can be read locally. - * - * Note that the @p ghost IndexSet may be empty and that any indices - * already contained in @p local are ignored during construction. That - * way, the ghost parameter can equal the set of locally relevant - * degrees of freedom, see step-32. - * - * @note This operation always creates a ghosted vector, which is considered - * read-only. - * - * @see - * @ref GlossGhostedVector "vectors with ghost elements" - */ - Vector(const IndexSet &local, - const IndexSet &ghost, - const MPI_Comm &communicator); - - /** - * Construct a new parallel PETSc vector without ghost elements from an - * IndexSet. - * - * Note that @p local must be ascending and 1:1, see - * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in - * @p local need to be contiguous, meaning you can only create vectors - * from a DoFHandler with several finite element components if they are - * not reordered by component (use a PETScWrappers::BlockVector - * otherwise). - */ - explicit Vector(const IndexSet &local, const MPI_Comm &communicator); - - /** - * Release all memory and return to a state just like after having - * called the default constructor. - */ - virtual void - clear() override; - - /** - * Copy the given vector. Resize the present vector if necessary. Also - * take over the MPI communicator of @p v. - */ - Vector & - operator=(const Vector &v); - - /** - * Set all components of the vector to the given number @p s. Simply - * pass this down to the base class, but we still need to declare this - * function to make the example given in the discussion about making the - * constructor explicit work. - */ - Vector & - operator=(const PetscScalar s); - - /** - * Copy the values of a deal.II vector (as opposed to those of the PETSc - * vector wrapper class) into this object. - * - * Contrary to the case of sequential vectors, this operators requires - * that the present vector already has the correct size, since we need - * to have a partition and a communicator present which we otherwise - * can't get from the source vector. - */ - template - Vector & - operator=(const dealii::Vector &v); - - /** - * Change the dimension of the vector to @p N. It is unspecified how - * resizing the vector affects the memory allocation of this object; - * i.e., it is not guaranteed that resizing it to a smaller size - * actually also reduces memory consumption, or if for efficiency the - * same amount of memory is used - * - * @p local_size denotes how many of the @p N values shall be stored - * locally on the present process. for less data. - * - * @p communicator denotes the MPI communicator henceforth to be used - * for this vector. - * - * If @p omit_zeroing_entries is false, the vector is filled by zeros. - * Otherwise, the elements are left an unspecified state. - */ - void - reinit(const MPI_Comm &communicator, - const size_type N, - const size_type local_size, - const bool omit_zeroing_entries = false); - - /** - * Change the dimension to that of the vector @p v, and also take over - * the partitioning into local sizes as well as the MPI communicator. - * The same applies as for the other @p reinit function. - * - * The elements of @p v are not copied, i.e. this function is the same - * as calling reinit(v.size(), v.local_size(), - * omit_zeroing_entries). - */ - void - reinit(const Vector &v, const bool omit_zeroing_entries = false); - - /** - * Reinit as a vector with ghost elements. See the constructor with - * same signature for more details. - * - * @see - * @ref GlossGhostedVector "vectors with ghost elements" - */ - void - reinit(const IndexSet &local, - const IndexSet &ghost, - const MPI_Comm &communicator); - - /** - * Reinit as a vector without ghost elements. See constructor with same - * signature for more details. - * - * @see - * @ref GlossGhostedVector "vectors with ghost elements" - */ - void - reinit(const IndexSet &local, const MPI_Comm &communicator); - - /** - * Return a reference to the MPI communicator object in use with this - * vector. - */ - const MPI_Comm & - get_mpi_communicator() const override; - - /** - * Print to a stream. @p precision denotes the desired precision with - * which values shall be printed, @p scientific whether scientific - * notation shall be used. If @p across is @p true then the vector is - * printed in a line, while if @p false then the elements are printed on - * a separate line each. - * - * @note This function overloads the one in the base class to ensure - * that the right thing happens for parallel vectors that are - * distributed across processors. - */ - void - print(std::ostream & out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * @copydoc PETScWrappers::VectorBase::all_zero() - * - * @note This function overloads the one in the base class to make this - * a collective operation. - */ - bool - all_zero() const; - - protected: - /** - * Create a vector of length @p n. For this class, we create a parallel - * vector. @p n denotes the total size of the vector to be created. @p - * local_size denotes how many of these elements shall be stored - * locally. - */ - virtual void - create_vector(const size_type n, const size_type local_size); - - - - /** - * Create a vector of global length @p n, local size @p local_size and - * with the specified ghost indices. Note that you need to call - * update_ghost_values() before accessing those. - */ - virtual void - create_vector(const size_type n, - const size_type local_size, - const IndexSet &ghostnodes); - - - private: - /** - * Copy of the communicator object to be used for this parallel vector. - */ - MPI_Comm communicator; - }; - - - // ------------------ template and inline functions ------------- - - - /** - * Global function @p swap which overloads the default implementation of - * the C++ standard library which uses a temporary object. The function - * simply exchanges the data of the two vectors. - * - * @relatesalso PETScWrappers::MPI::Vector - * @author Wolfgang Bangerth, 2004 - */ - inline void - swap(Vector &u, Vector &v) - { - u.swap(v); - } - - -# ifndef DOXYGEN - - template - Vector::Vector(const MPI_Comm & communicator, - const dealii::Vector &v, - const size_type local_size) - : communicator(communicator) - { - Vector::create_vector(v.size(), local_size); - - *this = v; - } - - - - inline Vector & - Vector::operator=(const PetscScalar s) - { - VectorBase::operator=(s); - - return *this; - } - - - - template - inline Vector & - Vector::operator=(const dealii::Vector &v) - { - Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size())); - - // FIXME: the following isn't necessarily fast, but this is due to - // the fact that PETSc doesn't offer an inlined access operator. - // - // if someone wants to contribute some code: to make this code - // faster, one could either first convert all values to PetscScalar, - // and then set them all at once using VecSetValues. This has the - // drawback that it could take quite some memory, if the vector is - // large, and it would in addition allocate memory on the heap, which - // is expensive. an alternative would be to split the vector into - // chunks of, say, 128 elements, convert a chunk at a time and set it - // in the output vector using VecSetValues. since 128 elements is - // small enough, this could easily be allocated on the stack (as a - // local variable) which would make the whole thing much more - // efficient. - // - // a second way to make things faster is for the special case that - // number==PetscScalar. we could then declare a specialization of - // this template, and omit the conversion. the problem with this is - // that the best we can do is to use VecSetValues, but this isn't - // very efficient either: it wants to see an array of indices, which - // in this case a) again takes up a whole lot of memory on the heap, - // and b) is totally dumb since its content would simply be the - // sequence 0,1,2,3,...,n. the best of all worlds would probably be a - // function in Petsc that would take a pointer to an array of - // PetscScalar values and simply copy n elements verbatim into the - // vector... - for (size_type i = 0; i < v.size(); ++i) - (*this)(i) = v(i); - - compress(::dealii::VectorOperation::insert); - - return *this; - } - - - - inline const MPI_Comm & - Vector::get_mpi_communicator() const - { - return communicator; - } - -# endif // DOXYGEN - } // namespace MPI -} // namespace PETScWrappers - -namespace internal -{ - namespace LinearOperatorImplementation - { - template - class ReinitHelper; - - /** - * A helper class used internally in linear_operator.h. Specialization for - * PETScWrappers::MPI::Vector. - */ - template <> - class ReinitHelper - { - public: - template - static void - reinit_range_vector(const Matrix & matrix, - PETScWrappers::MPI::Vector &v, - bool /*omit_zeroing_entries*/) - { - v.reinit(matrix.locally_owned_range_indices(), - matrix.get_mpi_communicator()); - } - - template - static void - reinit_domain_vector(const Matrix & matrix, - PETScWrappers::MPI::Vector &v, - bool /*omit_zeroing_entries*/) - { - v.reinit(matrix.locally_owned_domain_indices(), - matrix.get_mpi_communicator()); - } - }; - - } // namespace LinearOperatorImplementation -} /* namespace internal */ - -/**@}*/ - - -/** - * Declare dealii::PETScWrappers::MPI::Vector as distributed vector. - * - * @author Uwe Koecher, 2017 - */ -template <> -struct is_serial_vector : std::false_type -{}; - - -DEAL_II_NAMESPACE_CLOSE +# include -# endif // DEAL_II_WITH_PETSC +# pragma DEAL_II_WARNING( \ + "This file is deprecated. Use deal.II/lac/petsc_vector.h instead!") #endif /*------------------------- petsc_parallel_vector.h -------------------------*/ diff --git a/include/deal.II/lac/petsc_sparse_matrix.h b/include/deal.II/lac/petsc_sparse_matrix.h index f192d1d3fe..a36670fbcb 100644 --- a/include/deal.II/lac/petsc_sparse_matrix.h +++ b/include/deal.II/lac/petsc_sparse_matrix.h @@ -23,7 +23,7 @@ # include # include -# include +# include # include @@ -284,6 +284,493 @@ namespace PETScWrappers */ friend class BlockMatrixBase; }; + + namespace MPI + { + /** + * Implementation of a parallel sparse matrix class based on PETSc, with + * rows of the matrix distributed across an MPI network. All the + * functionality is actually in the base class, except for the calls to + * generate a parallel sparse matrix. This is possible since PETSc only + * works on an abstract matrix type and internally distributes to + * functions that do the actual work depending on the actual matrix type + * (much like using virtual functions). Only the functions creating a + * matrix of specific type differ, and are implemented in this particular + * class. + * + * There are a number of comments on the communication model as well as + * access to individual elements in the documentation to the parallel + * vector class. These comments apply here as well. + * + * + *

Partitioning of matrices

+ * + * PETSc partitions parallel matrices so that each MPI process "owns" a + * certain number of rows (i.e. only this process stores the respective + * entries in these rows). The number of rows each process owns has to be + * passed to the constructors and reinit() functions via the argument @p + * local_rows. The individual values passed as @p local_rows on all the + * MPI processes of course have to add up to the global number of rows of + * the matrix. + * + * In addition to this, PETSc also partitions the rectangular chunk of the + * matrix it owns (i.e. the @p local_rows times n() elements in the + * matrix), so that matrix vector multiplications can be performed + * efficiently. This column-partitioning therefore has to match the + * partitioning of the vectors with which the matrix is multiplied, just + * as the row-partitioning has to match the partitioning of destination + * vectors. This partitioning is passed to the constructors and reinit() + * functions through the @p local_columns variable, which again has to add + * up to the global number of columns in the matrix. The name @p + * local_columns may be named inappropriately since it does not reflect + * that only these columns are stored locally, but it reflects the fact + * that these are the columns for which the elements of incoming vectors + * are stored locally. + * + * To make things even more complicated, PETSc needs a very good estimate + * of the number of elements to be stored in each row to be efficient. + * Otherwise it spends most of the time with allocating small chunks of + * memory, a process that can slow down programs to a crawl if it happens + * to often. As if a good estimate of the number of entries per row isn't + * even, it even needs to split this as follows: for each row it owns, it + * needs an estimate for the number of elements in this row that fall into + * the columns that are set apart for this process (see above), and the + * number of elements that are in the rest of the columns. + * + * Since in general this information is not readily available, most of the + * initializing functions of this class assume that all of the number of + * elements you give as an argument to @p n_nonzero_per_row or by @p + * row_lengths fall into the columns "owned" by this process, and none + * into the other ones. This is a fair guess for most of the rows, since + * in a good domain partitioning, nodes only interact with nodes that are + * within the same subdomain. It does not hold for nodes on the interfaces + * of subdomain, however, and for the rows corresponding to these nodes, + * PETSc will have to allocate additional memory, a costly process. + * + * The only way to avoid this is to tell PETSc where the actual entries of + * the matrix will be. For this, there are constructors and reinit() + * functions of this class that take a DynamicSparsityPattern object + * containing all this information. While in the general case it is + * sufficient if the constructors and reinit() functions know the number + * of local rows and columns, the functions getting a sparsity pattern + * also need to know the number of local rows (@p local_rows_per_process) + * and columns (@p local_columns_per_process) for all other processes, in + * order to compute which parts of the matrix are which. Thus, it is not + * sufficient to just count the number of degrees of freedom that belong + * to a particular process, but you have to have the numbers for all + * processes available at all processes. + * + * @ingroup PETScWrappers + * @ingroup Matrix1 + * @author Wolfgang Bangerth, 2004 + */ + class SparseMatrix : public MatrixBase + { + public: + /** + * Declare type for container size. + */ + using size_type = types::global_dof_index; + + /** + * A structure that describes some of the traits of this class in terms + * of its run-time behavior. Some other classes (such as the block + * matrix classes) that take one or other of the matrix classes as its + * template parameters can tune their behavior based on the variables in + * this class. + */ + struct Traits + { + /** + * It is not safe to elide additions of zeros to individual elements + * of this matrix. The reason is that additions to the matrix may + * trigger collective operations synchronizing buffers on multiple + * processes. If an addition is elided on one process, this may lead + * to other processes hanging in an infinite waiting loop. + */ + static const bool zero_addition_can_be_elided = false; + }; + + /** + * Default constructor. Create an empty matrix. + */ + SparseMatrix(); + + /** + * Destructor to free the PETSc object. + */ + ~SparseMatrix() override; + + /** + * Create a sparse matrix of dimensions @p m times @p n, with an initial + * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row + * nonzero elements per row (see documentation of the MatCreateAIJ PETSc + * function for more information about these parameters). PETSc is able + * to cope with the situation that more than this number of elements are + * later allocated for a row, but this involves copying data, and is + * thus expensive. + * + * For the meaning of the @p local_row and @p local_columns parameters, + * see the class documentation. + * + * The @p is_symmetric flag determines whether we should tell PETSc that + * the matrix is going to be symmetric (as indicated by the call + * MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc + * documentation states that one cannot form an ILU decomposition of a + * matrix for which this flag has been set to @p true, only an ICC. The + * default value of this flag is @p false. + * + * @deprecated This constructor is deprecated: please use the + * constructor with a sparsity pattern argument instead. + */ + DEAL_II_DEPRECATED + SparseMatrix(const MPI_Comm &communicator, + const size_type m, + const size_type n, + const size_type local_rows, + const size_type local_columns, + const size_type n_nonzero_per_row, + const bool is_symmetric = false, + const size_type n_offdiag_nonzero_per_row = 0); + + /** + * Initialize a rectangular matrix with @p m rows and @p n columns. The + * maximal number of nonzero entries for diagonal and off- diagonal + * blocks of each row is given by the @p row_lengths and @p + * offdiag_row_lengths arrays. + * + * For the meaning of the @p local_row and @p local_columns parameters, + * see the class documentation. + * + * Just as for the other constructors: PETSc is able to cope with the + * situation that more than this number of elements are later allocated + * for a row, but this involves copying data, and is thus expensive. + * + * The @p is_symmetric flag determines whether we should tell PETSc that + * the matrix is going to be symmetric (as indicated by the call + * MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc + * documentation states that one cannot form an ILU decomposition of a + * matrix for which this flag has been set to @p true, only an ICC. The + * default value of this flag is @p false. + * + * @deprecated This constructor is deprecated: please use the + * constructor with a sparsity pattern argument instead. + */ + DEAL_II_DEPRECATED + SparseMatrix(const MPI_Comm & communicator, + const size_type m, + const size_type n, + const size_type local_rows, + const size_type local_columns, + const std::vector &row_lengths, + const bool is_symmetric = false, + const std::vector &offdiag_row_lengths = + std::vector()); + + /** + * Initialize using the given sparsity pattern with communication + * happening over the provided @p communicator. + * + * For the meaning of the @p local_rows_per_process and @p + * local_columns_per_process parameters, see the class documentation. + * + * Note that PETSc can be very slow if you do not provide it with a good + * estimate of the lengths of rows. Using the present function is a very + * efficient way to do this, as it uses the exact number of nonzero + * entries for each row of the matrix by using the given sparsity + * pattern argument. If the @p preset_nonzero_locations flag is @p true, + * this function in addition not only sets the correct row sizes up + * front, but also pre-allocated the correct nonzero entries in the + * matrix. + * + * PETsc allows to later add additional nonzero entries to a matrix, by + * simply writing to these elements. However, this will then lead to + * additional memory allocations which are very inefficient and will + * greatly slow down your program. It is therefore significantly more + * efficient to get memory allocation right from the start. + */ + template + SparseMatrix(const MPI_Comm & communicator, + const SparsityPatternType & sparsity_pattern, + const std::vector &local_rows_per_process, + const std::vector &local_columns_per_process, + const unsigned int this_process, + const bool preset_nonzero_locations = true); + + /** + * This operator assigns a scalar to a matrix. Since this does usually + * not make much sense (should we set all matrix entries to this value? + * Only the nonzero entries of the sparsity pattern?), this operation is + * only allowed if the actual value to be assigned is zero. This + * operator only exists to allow for the obvious notation + * matrix=0, which sets all elements of the matrix to zero, but + * keep the sparsity pattern previously used. + */ + SparseMatrix & + operator=(const value_type d); + + + /** + * Make a copy of the PETSc matrix @p other. It is assumed that both + * matrices have the same SparsityPattern. + */ + void + copy_from(const SparseMatrix &other); + + /** + * Throw away the present matrix and generate one that has the same + * properties as if it were created by the constructor of this class + * with the same argument list as the present function. + * + * @deprecated This overload of reinit is deprecated: + * please use the overload with a sparsity pattern argument instead. + */ + DEAL_II_DEPRECATED + void + reinit(const MPI_Comm &communicator, + const size_type m, + const size_type n, + const size_type local_rows, + const size_type local_columns, + const size_type n_nonzero_per_row, + const bool is_symmetric = false, + const size_type n_offdiag_nonzero_per_row = 0); + + /** + * Throw away the present matrix and generate one that has the same + * properties as if it were created by the constructor of this class + * with the same argument list as the present function. + * + * @deprecated This overload of reinit is deprecated: + * please use the overload with a sparsity pattern argument instead. + */ + DEAL_II_DEPRECATED + void + reinit(const MPI_Comm & communicator, + const size_type m, + const size_type n, + const size_type local_rows, + const size_type local_columns, + const std::vector &row_lengths, + const bool is_symmetric = false, + const std::vector &offdiag_row_lengths = + std::vector()); + + /** + * Initialize using the given sparsity pattern with communication + * happening over the provided @p communicator. + * + * Note that PETSc can be very slow if you do not provide it with a good + * estimate of the lengths of rows. Using the present function is a very + * efficient way to do this, as it uses the exact number of nonzero + * entries for each row of the matrix by using the given sparsity + * pattern argument. If the @p preset_nonzero_locations flag is @p true, + * this function in addition not only sets the correct row sizes up + * front, but also pre-allocated the correct nonzero entries in the + * matrix. + * + * PETsc allows to later add additional nonzero entries to a matrix, by + * simply writing to these elements. However, this will then lead to + * additional memory allocations which are very inefficient and will + * greatly slow down your program. It is therefore significantly more + * efficient to get memory allocation right from the start. + */ + template + void + reinit(const MPI_Comm & communicator, + const SparsityPatternType & sparsity_pattern, + const std::vector &local_rows_per_process, + const std::vector &local_columns_per_process, + const unsigned int this_process, + const bool preset_nonzero_locations = true); + + /** + * Create a matrix where the size() of the IndexSets determine the + * global number of rows and columns and the entries of the IndexSet + * give the rows and columns for the calling processor. Note that only + * ascending, 1:1 IndexSets are supported. + */ + template + void + reinit(const IndexSet & local_rows, + const IndexSet & local_columns, + const SparsityPatternType &sparsity_pattern, + const MPI_Comm & communicator); + + /** + * Initialize this matrix to have the same structure as @p other. This + * will not copy the values of the other matrix, but you can use + * copy_from() for this. + */ + void + reinit(const SparseMatrix &other); + + /** + * Return a reference to the MPI communicator object in use with this + * matrix. + */ + virtual const MPI_Comm & + get_mpi_communicator() const override; + + /** + * @addtogroup Exceptions + * @{ + */ + /** + * Exception + */ + DeclException2(ExcLocalRowsTooLarge, + int, + int, + << "The number of local rows " << arg1 + << " must be larger than the total number of rows " + << arg2); + //@} + + /** + * Return the square of the norm of the vector $v$ with respect to the + * norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is + * useful, e.g. in the finite element context, where the $L_2$ norm of a + * function equals the matrix norm with respect to the mass matrix of + * the vector representing the nodal values of the finite element + * function. + * + * Obviously, the matrix needs to be quadratic for this operation. + * + * The implementation of this function is not as efficient as the one in + * the @p MatrixBase class used in deal.II (i.e. the original one, not + * the PETSc wrapper class) since PETSc doesn't support this operation + * and needs a temporary vector. + */ + PetscScalar + matrix_norm_square(const Vector &v) const; + + /** + * Compute the matrix scalar product $\left(u^\ast,Mv\right)$. + * + * The implementation of this function is not as efficient as the one in + * the @p MatrixBase class used in deal.II (i.e. the original one, not + * the PETSc wrapper class) since PETSc doesn't support this operation + * and needs a temporary vector. + */ + PetscScalar + matrix_scalar_product(const Vector &u, const Vector &v) const; + + /** + * Return the partitioning of the domain space of this matrix, i.e., the + * partitioning of the vectors this matrix has to be multiplied with. + */ + IndexSet + locally_owned_domain_indices() const; + + /** + * Return the partitioning of the range space of this matrix, i.e., the + * partitioning of the vectors that result from matrix-vector + * products. + */ + IndexSet + locally_owned_range_indices() const; + + /** + * Perform the matrix-matrix multiplication $C = AB$, or, + * $C = A \text{diag}(V) B$ given a compatible vector $V$. + * + * This function calls MatrixBase::mmult() to do the actual work. + */ + void + mmult(SparseMatrix & C, + const SparseMatrix &B, + const MPI::Vector & V = MPI::Vector()) const; + + /** + * Perform the matrix-matrix multiplication with the transpose of + * this, i.e., $C = A^T B$, or, + * $C = A^T \text{diag}(V) B$ given a compatible vector $V$. + * + * This function calls MatrixBase::Tmmult() to do the actual work. + */ + void + Tmmult(SparseMatrix & C, + const SparseMatrix &B, + const MPI::Vector & V = MPI::Vector()) const; + + private: + /** + * Copy of the communicator object to be used for this parallel vector. + */ + MPI_Comm communicator; + + /** + * Do the actual work for the respective reinit() function and the + * matching constructor, i.e. create a matrix. Getting rid of the + * previous matrix is left to the caller. + * + * @deprecated This overload of do_reinit is deprecated: + * please use the overload with a sparsity pattern argument instead. + */ + DEAL_II_DEPRECATED + void + do_reinit(const size_type m, + const size_type n, + const size_type local_rows, + const size_type local_columns, + const size_type n_nonzero_per_row, + const bool is_symmetric = false, + const size_type n_offdiag_nonzero_per_row = 0); + + /** + * Same as previous function. + * + * @deprecated This overload of do_reinit is deprecated: + * please use the overload with a sparsity pattern argument instead. + */ + DEAL_II_DEPRECATED + void + do_reinit(const size_type m, + const size_type n, + const size_type local_rows, + const size_type local_columns, + const std::vector &row_lengths, + const bool is_symmetric = false, + const std::vector &offdiag_row_lengths = + std::vector()); + + /** + * Same as previous functions. + */ + template + void + do_reinit(const SparsityPatternType & sparsity_pattern, + const std::vector &local_rows_per_process, + const std::vector &local_columns_per_process, + const unsigned int this_process, + const bool preset_nonzero_locations); + + /** + * Same as previous functions. + */ + template + void + do_reinit(const IndexSet & local_rows, + const IndexSet & local_columns, + const SparsityPatternType &sparsity_pattern); + + /** + * To allow calling protected prepare_add() and prepare_set(). + */ + friend class BlockMatrixBase; + }; + + + + // -------- template and inline functions ---------- + + inline const MPI_Comm & + SparseMatrix::get_mpi_communicator() const + { + return communicator; + } + } // namespace MPI } // namespace PETScWrappers DEAL_II_NAMESPACE_CLOSE diff --git a/include/deal.II/lac/petsc_vector.h b/include/deal.II/lac/petsc_vector.h new file mode 100644 index 0000000000..1c43760ac1 --- /dev/null +++ b/include/deal.II/lac/petsc_vector.h @@ -0,0 +1,573 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2004 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_petsc_vector_h +# define dealii_petsc_vector_h + + +# include + +# ifdef DEAL_II_WITH_PETSC + +# include +# include + +# include +# include +# include +# include +# include + +DEAL_II_NAMESPACE_OPEN + + +/*! @addtogroup PETScWrappers + *@{ + */ +namespace PETScWrappers +{ + /** + * Namespace for PETSc classes that work in parallel over MPI, such as + * distributed vectors and matrices. + * + * @ingroup PETScWrappers + * @author Wolfgang Bangerth, 2004 + */ + namespace MPI + { + /** + * Implementation of a parallel vector class based on PETSC and using MPI + * communication to synchronize distributed operations. All the + * functionality is actually in the base class, except for the calls to + * generate a parallel vector. This is possible since PETSc only works on + * an abstract vector type and internally distributes to functions that do + * the actual work depending on the actual vector type (much like using + * virtual functions). Only the functions creating a vector of specific + * type differ, and are implemented in this particular class. + * + * + *

Parallel communication model

+ * + * The parallel functionality of PETSc is built on top of the Message + * Passing Interface (MPI). MPI's communication model is built on + * collective communications: if one process wants something from another, + * that other process has to be willing to accept this communication. A + * process cannot query data from another process by calling a remote + * function, without that other process expecting such a transaction. The + * consequence is that most of the operations in the base class of this + * class have to be called collectively. For example, if you want to + * compute the l2 norm of a parallel vector, @em all processes across + * which this vector is shared have to call the @p l2_norm function. If + * you don't do this, but instead only call the @p l2_norm function on one + * process, then the following happens: This one process will call one of + * the collective MPI functions and wait for all the other processes to + * join in on this. Since the other processes don't call this function, + * you will either get a time-out on the first process, or, worse, by the + * time the next a call to a PETSc function generates an MPI message on + * the other processes, you will get a cryptic message that only a subset + * of processes attempted a communication. These bugs can be very hard to + * figure out, unless you are well-acquainted with the communication model + * of MPI, and know which functions may generate MPI messages. + * + * One particular case, where an MPI message may be generated unexpectedly + * is discussed below. + * + * + *

Accessing individual elements of a vector

+ * + * PETSc does allow read access to individual elements of a vector, but in + * the distributed case only to elements that are stored locally. We + * implement this through calls like d=vec(i). However, if you + * access an element outside the locally stored range, an exception is + * generated. + * + * In contrast to read access, PETSc (and the respective deal.II wrapper + * classes) allow to write (or add) to individual elements of vectors, + * even if they are stored on a different process. You can do this + * writing, for example, vec(i)=d or vec(i)+=d, or + * similar operations. There is one catch, however, that may lead to very + * confusing error messages: PETSc requires application programs to call + * the compress() function when they switch from adding, to elements to + * writing to elements. The reasoning is that all processes might + * accumulate addition operations to elements, even if multiple processes + * write to the same elements. By the time we call compress() the next + * time, all these additions are executed. However, if one process adds to + * an element, and another overwrites to it, the order of execution would + * yield non-deterministic behavior if we don't make sure that a + * synchronization with compress() happens in between. + * + * In order to make sure these calls to compress() happen at the + * appropriate time, the deal.II wrappers keep a state variable that store + * which is the presently allowed operation: additions or writes. If it + * encounters an operation of the opposite kind, it calls compress() and + * flips the state. This can sometimes lead to very confusing behavior, in + * code that may for example look like this: + * @code + * PETScWrappers::MPI::Vector vector; + * ... + * // do some write operations on the vector + * for (unsigned int i=0; iv=0;. Presumably, the user wants to set every element of the + * vector to zero, but instead, what happens is this call: + * v=Vector@(0);, i.e. the vector is replaced by one + * of length zero. + */ + explicit Vector(const MPI_Comm &communicator, + const size_type n, + const size_type local_size); + + + /** + * Copy-constructor from deal.II vectors. Sets the dimension to that of + * the given vector, and copies all elements. + * + * @arg local_size denotes the size of the chunk that shall be stored on + * the present process. + * + * @arg communicator denotes the MPI communicator over which the + * different parts of the vector shall communicate + */ + template + explicit Vector(const MPI_Comm & communicator, + const dealii::Vector &v, + const size_type local_size); + + + /** + * Copy-constructor the values from a PETSc wrapper vector class. + * + * @arg local_size denotes the size of the chunk that shall be stored on + * the present process. + * + * @arg communicator denotes the MPI communicator over which the + * different parts of the vector shall communicate + * + * @deprecated The use of objects that are explicitly of type VectorBase + * is deprecated: use PETScWrappers::MPI::Vector instead. + */ + DEAL_II_DEPRECATED + explicit Vector(const MPI_Comm & communicator, + const VectorBase &v, + const size_type local_size); + + /** + * Construct a new parallel ghosted PETSc vector from IndexSets. + * + * Note that @p local must be ascending and 1:1, see + * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in + * @p local need to be contiguous, meaning you can only create vectors + * from a DoFHandler with several finite element components if they are + * not reordered by component (use a PETScWrappers::BlockVector + * otherwise). The global size of the vector is determined by + * local.size(). The global indices in @p ghost are supplied as ghost + * indices so that they can be read locally. + * + * Note that the @p ghost IndexSet may be empty and that any indices + * already contained in @p local are ignored during construction. That + * way, the ghost parameter can equal the set of locally relevant + * degrees of freedom, see step-32. + * + * @note This operation always creates a ghosted vector, which is considered + * read-only. + * + * @see + * @ref GlossGhostedVector "vectors with ghost elements" + */ + Vector(const IndexSet &local, + const IndexSet &ghost, + const MPI_Comm &communicator); + + /** + * Construct a new parallel PETSc vector without ghost elements from an + * IndexSet. + * + * Note that @p local must be ascending and 1:1, see + * IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in + * @p local need to be contiguous, meaning you can only create vectors + * from a DoFHandler with several finite element components if they are + * not reordered by component (use a PETScWrappers::BlockVector + * otherwise). + */ + explicit Vector(const IndexSet &local, const MPI_Comm &communicator); + + /** + * Release all memory and return to a state just like after having + * called the default constructor. + */ + virtual void + clear() override; + + /** + * Copy the given vector. Resize the present vector if necessary. Also + * take over the MPI communicator of @p v. + */ + Vector & + operator=(const Vector &v); + + /** + * Set all components of the vector to the given number @p s. Simply + * pass this down to the base class, but we still need to declare this + * function to make the example given in the discussion about making the + * constructor explicit work. + */ + Vector & + operator=(const PetscScalar s); + + /** + * Copy the values of a deal.II vector (as opposed to those of the PETSc + * vector wrapper class) into this object. + * + * Contrary to the case of sequential vectors, this operators requires + * that the present vector already has the correct size, since we need + * to have a partition and a communicator present which we otherwise + * can't get from the source vector. + */ + template + Vector & + operator=(const dealii::Vector &v); + + /** + * Change the dimension of the vector to @p N. It is unspecified how + * resizing the vector affects the memory allocation of this object; + * i.e., it is not guaranteed that resizing it to a smaller size + * actually also reduces memory consumption, or if for efficiency the + * same amount of memory is used + * + * @p local_size denotes how many of the @p N values shall be stored + * locally on the present process. for less data. + * + * @p communicator denotes the MPI communicator henceforth to be used + * for this vector. + * + * If @p omit_zeroing_entries is false, the vector is filled by zeros. + * Otherwise, the elements are left an unspecified state. + */ + void + reinit(const MPI_Comm &communicator, + const size_type N, + const size_type local_size, + const bool omit_zeroing_entries = false); + + /** + * Change the dimension to that of the vector @p v, and also take over + * the partitioning into local sizes as well as the MPI communicator. + * The same applies as for the other @p reinit function. + * + * The elements of @p v are not copied, i.e. this function is the same + * as calling reinit(v.size(), v.local_size(), + * omit_zeroing_entries). + */ + void + reinit(const Vector &v, const bool omit_zeroing_entries = false); + + /** + * Reinit as a vector with ghost elements. See the constructor with + * same signature for more details. + * + * @see + * @ref GlossGhostedVector "vectors with ghost elements" + */ + void + reinit(const IndexSet &local, + const IndexSet &ghost, + const MPI_Comm &communicator); + + /** + * Reinit as a vector without ghost elements. See constructor with same + * signature for more details. + * + * @see + * @ref GlossGhostedVector "vectors with ghost elements" + */ + void + reinit(const IndexSet &local, const MPI_Comm &communicator); + + /** + * Return a reference to the MPI communicator object in use with this + * vector. + */ + const MPI_Comm & + get_mpi_communicator() const override; + + /** + * Print to a stream. @p precision denotes the desired precision with + * which values shall be printed, @p scientific whether scientific + * notation shall be used. If @p across is @p true then the vector is + * printed in a line, while if @p false then the elements are printed on + * a separate line each. + * + * @note This function overloads the one in the base class to ensure + * that the right thing happens for parallel vectors that are + * distributed across processors. + */ + void + print(std::ostream & out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * @copydoc PETScWrappers::VectorBase::all_zero() + * + * @note This function overloads the one in the base class to make this + * a collective operation. + */ + bool + all_zero() const; + + protected: + /** + * Create a vector of length @p n. For this class, we create a parallel + * vector. @p n denotes the total size of the vector to be created. @p + * local_size denotes how many of these elements shall be stored + * locally. + */ + virtual void + create_vector(const size_type n, const size_type local_size); + + + + /** + * Create a vector of global length @p n, local size @p local_size and + * with the specified ghost indices. Note that you need to call + * update_ghost_values() before accessing those. + */ + virtual void + create_vector(const size_type n, + const size_type local_size, + const IndexSet &ghostnodes); + + + private: + /** + * Copy of the communicator object to be used for this parallel vector. + */ + MPI_Comm communicator; + }; + + + // ------------------ template and inline functions ------------- + + + /** + * Global function @p swap which overloads the default implementation of + * the C++ standard library which uses a temporary object. The function + * simply exchanges the data of the two vectors. + * + * @relatesalso PETScWrappers::MPI::Vector + * @author Wolfgang Bangerth, 2004 + */ + inline void + swap(Vector &u, Vector &v) + { + u.swap(v); + } + + +# ifndef DOXYGEN + + template + Vector::Vector(const MPI_Comm & communicator, + const dealii::Vector &v, + const size_type local_size) + : communicator(communicator) + { + Vector::create_vector(v.size(), local_size); + + *this = v; + } + + + + inline Vector & + Vector::operator=(const PetscScalar s) + { + VectorBase::operator=(s); + + return *this; + } + + + + template + inline Vector & + Vector::operator=(const dealii::Vector &v) + { + Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size())); + + // FIXME: the following isn't necessarily fast, but this is due to + // the fact that PETSc doesn't offer an inlined access operator. + // + // if someone wants to contribute some code: to make this code + // faster, one could either first convert all values to PetscScalar, + // and then set them all at once using VecSetValues. This has the + // drawback that it could take quite some memory, if the vector is + // large, and it would in addition allocate memory on the heap, which + // is expensive. an alternative would be to split the vector into + // chunks of, say, 128 elements, convert a chunk at a time and set it + // in the output vector using VecSetValues. since 128 elements is + // small enough, this could easily be allocated on the stack (as a + // local variable) which would make the whole thing much more + // efficient. + // + // a second way to make things faster is for the special case that + // number==PetscScalar. we could then declare a specialization of + // this template, and omit the conversion. the problem with this is + // that the best we can do is to use VecSetValues, but this isn't + // very efficient either: it wants to see an array of indices, which + // in this case a) again takes up a whole lot of memory on the heap, + // and b) is totally dumb since its content would simply be the + // sequence 0,1,2,3,...,n. the best of all worlds would probably be a + // function in Petsc that would take a pointer to an array of + // PetscScalar values and simply copy n elements verbatim into the + // vector... + for (size_type i = 0; i < v.size(); ++i) + (*this)(i) = v(i); + + compress(::dealii::VectorOperation::insert); + + return *this; + } + + + + inline const MPI_Comm & + Vector::get_mpi_communicator() const + { + return communicator; + } + +# endif // DOXYGEN + } // namespace MPI +} // namespace PETScWrappers + +namespace internal +{ + namespace LinearOperatorImplementation + { + template + class ReinitHelper; + + /** + * A helper class used internally in linear_operator.h. Specialization for + * PETScWrappers::MPI::Vector. + */ + template <> + class ReinitHelper + { + public: + template + static void + reinit_range_vector(const Matrix & matrix, + PETScWrappers::MPI::Vector &v, + bool /*omit_zeroing_entries*/) + { + v.reinit(matrix.locally_owned_range_indices(), + matrix.get_mpi_communicator()); + } + + template + static void + reinit_domain_vector(const Matrix & matrix, + PETScWrappers::MPI::Vector &v, + bool /*omit_zeroing_entries*/) + { + v.reinit(matrix.locally_owned_domain_indices(), + matrix.get_mpi_communicator()); + } + }; + + } // namespace LinearOperatorImplementation +} /* namespace internal */ + +/**@}*/ + + +/** + * Declare dealii::PETScWrappers::MPI::Vector as distributed vector. + * + * @author Uwe Koecher, 2017 + */ +template <> +struct is_serial_vector : std::false_type +{}; + + +DEAL_II_NAMESPACE_CLOSE + +# endif // DEAL_II_WITH_PETSC + +#endif +/*------------------------- petsc_vector.h -------------------------*/ diff --git a/include/deal.II/lac/read_write_vector.templates.h b/include/deal.II/lac/read_write_vector.templates.h index 7d86d56cae..c5792d876d 100644 --- a/include/deal.II/lac/read_write_vector.templates.h +++ b/include/deal.II/lac/read_write_vector.templates.h @@ -29,7 +29,7 @@ #include #ifdef DEAL_II_WITH_PETSC -# include +# include #endif #ifdef DEAL_II_WITH_TRILINOS diff --git a/include/deal.II/multigrid/mg_transfer.h b/include/deal.II/multigrid/mg_transfer.h index e5f1752785..0faa248c39 100644 --- a/include/deal.II/multigrid/mg_transfer.h +++ b/include/deal.II/multigrid/mg_transfer.h @@ -26,8 +26,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index da3c695f76..882b6c5745 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -25,7 +25,7 @@ #include -#include +#include #include #include #include diff --git a/include/deal.II/numerics/error_estimator.templates.h b/include/deal.II/numerics/error_estimator.templates.h index 2b8175afc7..f5252acd04 100644 --- a/include/deal.II/numerics/error_estimator.templates.h +++ b/include/deal.II/numerics/error_estimator.templates.h @@ -43,8 +43,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/include/deal.II/numerics/matrix_creator.templates.h b/include/deal.II/numerics/matrix_creator.templates.h index 43addb419b..092507fc84 100644 --- a/include/deal.II/numerics/matrix_creator.templates.h +++ b/include/deal.II/numerics/matrix_creator.templates.h @@ -43,10 +43,9 @@ #include #ifdef DEAL_II_WITH_PETSC -# include -# include -# include +# include # include +# include #endif #ifdef DEAL_II_WITH_TRILINOS diff --git a/include/deal.II/numerics/vector_tools.templates.h b/include/deal.II/numerics/vector_tools.templates.h index d43169b8fd..8980930b52 100644 --- a/include/deal.II/numerics/vector_tools.templates.h +++ b/include/deal.II/numerics/vector_tools.templates.h @@ -62,8 +62,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/include/deal.II/sundials/arkode.h b/include/deal.II/sundials/arkode.h index 983e3c5679..e8fb2b4696 100644 --- a/include/deal.II/sundials/arkode.h +++ b/include/deal.II/sundials/arkode.h @@ -28,8 +28,8 @@ # include # include # ifdef DEAL_II_WITH_PETSC -# include -# include +# include +# include # endif # include # include diff --git a/include/deal.II/sundials/copy.h b/include/deal.II/sundials/copy.h index 9971910b52..f57d9795b1 100644 --- a/include/deal.II/sundials/copy.h +++ b/include/deal.II/sundials/copy.h @@ -34,8 +34,8 @@ # endif # ifdef DEAL_II_WITH_PETSC -# include -# include +# include +# include # endif DEAL_II_NAMESPACE_OPEN diff --git a/include/deal.II/sundials/ida.h b/include/deal.II/sundials/ida.h index be8fcb2c61..969c000393 100644 --- a/include/deal.II/sundials/ida.h +++ b/include/deal.II/sundials/ida.h @@ -27,8 +27,8 @@ # include # include # ifdef DEAL_II_WITH_PETSC -# include -# include +# include +# include # endif # include # include diff --git a/source/algorithms/operator.cc b/source/algorithms/operator.cc index 715ce2b508..6b86a1a213 100644 --- a/source/algorithms/operator.cc +++ b/source/algorithms/operator.cc @@ -25,8 +25,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/base/mpi.cc b/source/base/mpi.cc index 88ab64ce44..b7fa8393c2 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -37,8 +37,8 @@ #endif #ifdef DEAL_II_WITH_PETSC -# include -# include +# include +# include # include #endif diff --git a/source/base/time_stepping.cc b/source/base/time_stepping.cc index a4efc6f7a7..9bb02320eb 100644 --- a/source/base/time_stepping.cc +++ b/source/base/time_stepping.cc @@ -18,8 +18,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/distributed/solution_transfer.cc b/source/distributed/solution_transfer.cc index 3782c328de..6fb2288872 100644 --- a/source/distributed/solution_transfer.cc +++ b/source/distributed/solution_transfer.cc @@ -30,8 +30,8 @@ # include # include # include -# include -# include +# include +# include # include # include # include diff --git a/source/dofs/dof_accessor_get.cc b/source/dofs/dof_accessor_get.cc index 6b8aeabdfa..4f1bdc2b03 100644 --- a/source/dofs/dof_accessor_get.cc +++ b/source/dofs/dof_accessor_get.cc @@ -28,8 +28,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/dofs/dof_accessor_set.cc b/source/dofs/dof_accessor_set.cc index 2c63ef8790..556dcd48c8 100644 --- a/source/dofs/dof_accessor_set.cc +++ b/source/dofs/dof_accessor_set.cc @@ -28,8 +28,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/fe/fe_values.cc b/source/fe/fe_values.cc index ffcab279fb..8f17342c52 100644 --- a/source/fe/fe_values.cc +++ b/source/fe/fe_values.cc @@ -36,8 +36,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/fe/mapping_fe_field.cc b/source/fe/mapping_fe_field.cc index 199e98fa18..3419ffc662 100644 --- a/source/fe/mapping_fe_field.cc +++ b/source/fe/mapping_fe_field.cc @@ -41,8 +41,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/fe/mapping_q1_eulerian.cc b/source/fe/mapping_q1_eulerian.cc index d7f10d1427..666a7bef6e 100644 --- a/source/fe/mapping_q1_eulerian.cc +++ b/source/fe/mapping_q1_eulerian.cc @@ -27,8 +27,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/fe/mapping_q_eulerian.cc b/source/fe/mapping_q_eulerian.cc index ac2eb9afd5..c95ebb6981 100644 --- a/source/fe/mapping_q_eulerian.cc +++ b/source/fe/mapping_q_eulerian.cc @@ -31,8 +31,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/lac/block_matrix_array.cc b/source/lac/block_matrix_array.cc index eb2b25086c..fd8408f507 100644 --- a/source/lac/block_matrix_array.cc +++ b/source/lac/block_matrix_array.cc @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include diff --git a/source/lac/petsc_matrix_base.cc b/source/lac/petsc_matrix_base.cc index 8cd7b94b90..7ec261dfd4 100644 --- a/source/lac/petsc_matrix_base.cc +++ b/source/lac/petsc_matrix_base.cc @@ -20,7 +20,6 @@ # include # include # include -# include # include # include diff --git a/source/lac/petsc_parallel_block_sparse_matrix.cc b/source/lac/petsc_parallel_block_sparse_matrix.cc index e513192246..da0e508e07 100644 --- a/source/lac/petsc_parallel_block_sparse_matrix.cc +++ b/source/lac/petsc_parallel_block_sparse_matrix.cc @@ -13,7 +13,7 @@ // // --------------------------------------------------------------------- -#include +#include #ifdef DEAL_II_WITH_PETSC diff --git a/source/lac/petsc_parallel_block_vector.cc b/source/lac/petsc_parallel_block_vector.cc index b4e6d9ed5f..11acbe1e1f 100644 --- a/source/lac/petsc_parallel_block_vector.cc +++ b/source/lac/petsc_parallel_block_vector.cc @@ -13,7 +13,7 @@ // // --------------------------------------------------------------------- -#include +#include #ifdef DEAL_II_WITH_PETSC diff --git a/source/lac/petsc_parallel_sparse_matrix.cc b/source/lac/petsc_parallel_sparse_matrix.cc index ae3f970d33..88178237dd 100644 --- a/source/lac/petsc_parallel_sparse_matrix.cc +++ b/source/lac/petsc_parallel_sparse_matrix.cc @@ -13,7 +13,7 @@ // // --------------------------------------------------------------------- -#include +#include #ifdef DEAL_II_WITH_PETSC @@ -22,7 +22,7 @@ # include # include # include -# include +# include # include DEAL_II_NAMESPACE_OPEN diff --git a/source/lac/petsc_parallel_vector.cc b/source/lac/petsc_parallel_vector.cc index 22621dbc7d..96a3e3d1d0 100644 --- a/source/lac/petsc_parallel_vector.cc +++ b/source/lac/petsc_parallel_vector.cc @@ -15,7 +15,7 @@ #include -#include +#include #ifdef DEAL_II_WITH_PETSC diff --git a/source/lac/petsc_vector_base.cc b/source/lac/petsc_vector_base.cc index 1a5b5843f8..7688d61d83 100644 --- a/source/lac/petsc_vector_base.cc +++ b/source/lac/petsc_vector_base.cc @@ -22,7 +22,7 @@ # include # include -# include +# include # include diff --git a/source/lac/solver.cc b/source/lac/solver.cc index 2c9ae3a708..6d432ae184 100644 --- a/source/lac/solver.cc +++ b/source/lac/solver.cc @@ -18,8 +18,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/lac/vector_memory.cc b/source/lac/vector_memory.cc index d1b5b74a00..0b5e84c3ae 100644 --- a/source/lac/vector_memory.cc +++ b/source/lac/vector_memory.cc @@ -18,8 +18,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/meshworker/mesh_worker_vector_selector.cc b/source/meshworker/mesh_worker_vector_selector.cc index c481784f10..bb7faf41c4 100644 --- a/source/meshworker/mesh_worker_vector_selector.cc +++ b/source/meshworker/mesh_worker_vector_selector.cc @@ -18,8 +18,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/multigrid/mg_base.cc b/source/multigrid/mg_base.cc index 0b60cf2f0b..8744c6cf40 100644 --- a/source/multigrid/mg_base.cc +++ b/source/multigrid/mg_base.cc @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/source/multigrid/multigrid.cc b/source/multigrid/multigrid.cc index e98d0da437..af0f49d958 100644 --- a/source/multigrid/multigrid.cc +++ b/source/multigrid/multigrid.cc @@ -18,8 +18,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/non_matching/coupling.cc b/source/non_matching/coupling.cc index 23f520c464..380c081a87 100644 --- a/source/non_matching/coupling.cc +++ b/source/non_matching/coupling.cc @@ -27,8 +27,7 @@ #include #include -#include -#include +#include #include #include #include diff --git a/source/numerics/data_out_dof_data.cc b/source/numerics/data_out_dof_data.cc index 9947e83fc8..79b9a37ec8 100644 --- a/source/numerics/data_out_dof_data.cc +++ b/source/numerics/data_out_dof_data.cc @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/data_out_dof_data_codim.cc b/source/numerics/data_out_dof_data_codim.cc index a040fd05d1..e085c3d382 100644 --- a/source/numerics/data_out_dof_data_codim.cc +++ b/source/numerics/data_out_dof_data_codim.cc @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/data_out_dof_data_inst2.cc b/source/numerics/data_out_dof_data_inst2.cc index 39448e6bb2..a2a1311d6a 100644 --- a/source/numerics/data_out_dof_data_inst2.cc +++ b/source/numerics/data_out_dof_data_inst2.cc @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/derivative_approximation.cc b/source/numerics/derivative_approximation.cc index ac0086e2f4..f4bdf5a237 100644 --- a/source/numerics/derivative_approximation.cc +++ b/source/numerics/derivative_approximation.cc @@ -36,8 +36,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/dof_output_operator.cc b/source/numerics/dof_output_operator.cc index ce7c4375d0..431331c807 100644 --- a/source/numerics/dof_output_operator.cc +++ b/source/numerics/dof_output_operator.cc @@ -21,8 +21,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/error_estimator_1d.cc b/source/numerics/error_estimator_1d.cc index 824dea8568..eef1d7454a 100644 --- a/source/numerics/error_estimator_1d.cc +++ b/source/numerics/error_estimator_1d.cc @@ -39,8 +39,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/fe_field_function.cc b/source/numerics/fe_field_function.cc index 409f59745a..aa996989e7 100644 --- a/source/numerics/fe_field_function.cc +++ b/source/numerics/fe_field_function.cc @@ -23,8 +23,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/matrix_tools.cc b/source/numerics/matrix_tools.cc index 556d6825b8..a5f62cd223 100644 --- a/source/numerics/matrix_tools.cc +++ b/source/numerics/matrix_tools.cc @@ -40,10 +40,9 @@ #include #ifdef DEAL_II_WITH_PETSC -# include -# include -# include +# include # include +# include #endif #ifdef DEAL_II_WITH_TRILINOS diff --git a/source/numerics/matrix_tools_once.cc b/source/numerics/matrix_tools_once.cc index 3f2e565e19..aded4396a4 100644 --- a/source/numerics/matrix_tools_once.cc +++ b/source/numerics/matrix_tools_once.cc @@ -40,9 +40,9 @@ #include #ifdef DEAL_II_WITH_PETSC +# include +# include # include -# include -# include # include #endif diff --git a/source/numerics/point_value_history.cc b/source/numerics/point_value_history.cc index 7055a47f76..d082043b61 100644 --- a/source/numerics/point_value_history.cc +++ b/source/numerics/point_value_history.cc @@ -18,8 +18,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/numerics/solution_transfer.cc b/source/numerics/solution_transfer.cc index 4acd75edb0..9e77a70d0b 100644 --- a/source/numerics/solution_transfer.cc +++ b/source/numerics/solution_transfer.cc @@ -30,8 +30,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/source/sundials/arkode.cc b/source/sundials/arkode.cc index 6a1b6547ee..3f8397e7b6 100644 --- a/source/sundials/arkode.cc +++ b/source/sundials/arkode.cc @@ -28,8 +28,8 @@ # include # endif # ifdef DEAL_II_WITH_PETSC -# include -# include +# include +# include # endif # include diff --git a/source/sundials/ida.cc b/source/sundials/ida.cc index 4c6902c044..f929cc4c81 100644 --- a/source/sundials/ida.cc +++ b/source/sundials/ida.cc @@ -28,8 +28,8 @@ # include # endif # ifdef DEAL_II_WITH_PETSC -# include -# include +# include +# include # endif # include diff --git a/source/sundials/kinsol.cc b/source/sundials/kinsol.cc index cc0c1a6646..273bf3a355 100644 --- a/source/sundials/kinsol.cc +++ b/source/sundials/kinsol.cc @@ -28,8 +28,8 @@ # include # endif # ifdef DEAL_II_WITH_PETSC -# include -# include +# include +# include # endif # include diff --git a/tests/arpack/parpack_advection_diffusion_petsc.cc b/tests/arpack/parpack_advection_diffusion_petsc.cc index 73f6381bf5..f66bc9835c 100644 --- a/tests/arpack/parpack_advection_diffusion_petsc.cc +++ b/tests/arpack/parpack_advection_diffusion_petsc.cc @@ -40,10 +40,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include diff --git a/tests/arpack/step-36_parpack.cc b/tests/arpack/step-36_parpack.cc index 3a708d2a21..9a54c1c7ad 100644 --- a/tests/arpack/step-36_parpack.cc +++ b/tests/arpack/step-36_parpack.cc @@ -41,10 +41,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include diff --git a/tests/fe/fe_enriched_step-36.cc b/tests/fe/fe_enriched_step-36.cc index b32b45f361..269bbc042d 100644 --- a/tests/fe/fe_enriched_step-36.cc +++ b/tests/fe/fe_enriched_step-36.cc @@ -40,10 +40,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include diff --git a/tests/fe/fe_enriched_step-36b.cc b/tests/fe/fe_enriched_step-36b.cc index 92b339dbaa..f6d4127345 100644 --- a/tests/fe/fe_enriched_step-36b.cc +++ b/tests/fe/fe_enriched_step-36b.cc @@ -44,10 +44,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include diff --git a/tests/gla/extract_subvector_to.cc b/tests/gla/extract_subvector_to.cc index fabccf83f4..540c1bf755 100644 --- a/tests/gla/extract_subvector_to.cc +++ b/tests/gla/extract_subvector_to.cc @@ -21,8 +21,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/tests/hp/hp_constraints_neither_dominate_01.cc b/tests/hp/hp_constraints_neither_dominate_01.cc index 6370f7414d..600c05c04e 100644 --- a/tests/hp/hp_constraints_neither_dominate_01.cc +++ b/tests/hp/hp_constraints_neither_dominate_01.cc @@ -47,10 +47,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/hp/hp_constraints_neither_dominate_02.cc b/tests/hp/hp_constraints_neither_dominate_02.cc index cde6113613..08469d737f 100644 --- a/tests/hp/hp_constraints_neither_dominate_02.cc +++ b/tests/hp/hp_constraints_neither_dominate_02.cc @@ -47,10 +47,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/lac/linear_operator_09.cc b/tests/lac/linear_operator_09.cc index 9a90be2323..362a4865ab 100644 --- a/tests/lac/linear_operator_09.cc +++ b/tests/lac/linear_operator_09.cc @@ -22,13 +22,12 @@ #include "../tests.h" // Vectors: -#include -#include #include +#include // Block Matrix and Vectors: -#include -#include +#include +#include using namespace dealii; diff --git a/tests/lac/utilities_01.cc b/tests/lac/utilities_01.cc index d73ee14922..7b719de54a 100644 --- a/tests/lac/utilities_01.cc +++ b/tests/lac/utilities_01.cc @@ -19,8 +19,8 @@ // 3.98974 > 3.95906 > 3.90828 > 3.83792 #include -#include #include +#include #include #include #include diff --git a/tests/lac/vector_reinit_03.cc b/tests/lac/vector_reinit_03.cc index 352108878a..072eab7536 100644 --- a/tests/lac/vector_reinit_03.cc +++ b/tests/lac/vector_reinit_03.cc @@ -21,7 +21,7 @@ #include -#include +#include #include #include "../tests.h" diff --git a/tests/lac/vector_type_traits_is_serial_03.cc b/tests/lac/vector_type_traits_is_serial_03.cc index 70db1b7e2e..37a2f87864 100644 --- a/tests/lac/vector_type_traits_is_serial_03.cc +++ b/tests/lac/vector_type_traits_is_serial_03.cc @@ -17,8 +17,8 @@ // check is_serial_vector type trait -#include -#include +#include +#include #include "../tests.h" diff --git a/tests/mappings/mapping_q_eulerian_07.cc b/tests/mappings/mapping_q_eulerian_07.cc index 8b6047521a..78752a8a51 100644 --- a/tests/mappings/mapping_q_eulerian_07.cc +++ b/tests/mappings/mapping_q_eulerian_07.cc @@ -45,7 +45,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/tests/mappings/mapping_q_eulerian_08.cc b/tests/mappings/mapping_q_eulerian_08.cc index 532fea53f2..8e09f05c72 100644 --- a/tests/mappings/mapping_q_eulerian_08.cc +++ b/tests/mappings/mapping_q_eulerian_08.cc @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/tests/matrix_free/interpolate_to_mg.cc b/tests/matrix_free/interpolate_to_mg.cc index da435b733c..296f3622f1 100644 --- a/tests/matrix_free/interpolate_to_mg.cc +++ b/tests/matrix_free/interpolate_to_mg.cc @@ -45,7 +45,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/tests/mpi/blockvec_01.cc b/tests/mpi/blockvec_01.cc index 89d9d363c3..b7ece7f3d7 100644 --- a/tests/mpi/blockvec_01.cc +++ b/tests/mpi/blockvec_01.cc @@ -19,7 +19,7 @@ #include -#include +#include #include #include diff --git a/tests/mpi/blockvec_02.cc b/tests/mpi/blockvec_02.cc index d56d28744e..aad8571074 100644 --- a/tests/mpi/blockvec_02.cc +++ b/tests/mpi/blockvec_02.cc @@ -20,7 +20,7 @@ #include -#include +#include #include #include diff --git a/tests/mpi/condense_01.cc b/tests/mpi/condense_01.cc index b90088b7a8..a012ef6824 100644 --- a/tests/mpi/condense_01.cc +++ b/tests/mpi/condense_01.cc @@ -32,7 +32,7 @@ #include #include -#include +#include #include "../tests.h" diff --git a/tests/mpi/constraint_matrix_condense_01.cc b/tests/mpi/constraint_matrix_condense_01.cc index 8707c12e20..24e12c66b7 100644 --- a/tests/mpi/constraint_matrix_condense_01.cc +++ b/tests/mpi/constraint_matrix_condense_01.cc @@ -28,7 +28,7 @@ #include -#include +#include #include "../tests.h" diff --git a/tests/mpi/constraint_matrix_set_zero_01.cc b/tests/mpi/constraint_matrix_set_zero_01.cc index 70d83806b2..c90488ab48 100644 --- a/tests/mpi/constraint_matrix_set_zero_01.cc +++ b/tests/mpi/constraint_matrix_set_zero_01.cc @@ -25,8 +25,8 @@ #include -#include -#include +#include +#include #include "../tests.h" diff --git a/tests/mpi/data_out_faces_01.cc b/tests/mpi/data_out_faces_01.cc index d6b98c6e69..708ffa71fc 100644 --- a/tests/mpi/data_out_faces_01.cc +++ b/tests/mpi/data_out_faces_01.cc @@ -34,7 +34,7 @@ #include #include -#include +#include #include #include diff --git a/tests/mpi/fe_tools_extrapolate_02.cc b/tests/mpi/fe_tools_extrapolate_02.cc index 10d88078e9..ab1d3e6a29 100644 --- a/tests/mpi/fe_tools_extrapolate_02.cc +++ b/tests/mpi/fe_tools_extrapolate_02.cc @@ -14,7 +14,7 @@ // --------------------------------------------------------------------- -#include +#include #include "fe_tools_extrapolate_common.h" diff --git a/tests/mpi/fe_tools_extrapolate_05.cc b/tests/mpi/fe_tools_extrapolate_05.cc index c0fb066465..00e2b8551e 100644 --- a/tests/mpi/fe_tools_extrapolate_05.cc +++ b/tests/mpi/fe_tools_extrapolate_05.cc @@ -14,7 +14,7 @@ // --------------------------------------------------------------------- -#include +#include #include "fe_tools_extrapolate_common.h" diff --git a/tests/mpi/ghost_01.cc b/tests/mpi/ghost_01.cc index 3e7f166394..26feae884f 100644 --- a/tests/mpi/ghost_01.cc +++ b/tests/mpi/ghost_01.cc @@ -19,7 +19,7 @@ #include -#include +#include #include #include diff --git a/tests/mpi/ghost_02.cc b/tests/mpi/ghost_02.cc index b49ee9e11f..265d673ff2 100644 --- a/tests/mpi/ghost_02.cc +++ b/tests/mpi/ghost_02.cc @@ -19,7 +19,7 @@ #include -#include +#include #include #include diff --git a/tests/mpi/ghost_03.cc b/tests/mpi/ghost_03.cc index 8951a9ee67..067e9e5e34 100644 --- a/tests/mpi/ghost_03.cc +++ b/tests/mpi/ghost_03.cc @@ -19,7 +19,7 @@ #include -#include +#include #include #include diff --git a/tests/mpi/has_hanging_nodes.cc b/tests/mpi/has_hanging_nodes.cc index a53240765c..33f9939162 100644 --- a/tests/mpi/has_hanging_nodes.cc +++ b/tests/mpi/has_hanging_nodes.cc @@ -43,8 +43,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/tests/mpi/hp_step-40.cc b/tests/mpi/hp_step-40.cc index 2e4ebbc9ef..1f5c3954ca 100644 --- a/tests/mpi/hp_step-40.cc +++ b/tests/mpi/hp_step-40.cc @@ -46,10 +46,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/mpi/interpolate_02.cc b/tests/mpi/interpolate_02.cc index 32b12d9454..00ddd4dd4c 100644 --- a/tests/mpi/interpolate_02.cc +++ b/tests/mpi/interpolate_02.cc @@ -41,7 +41,7 @@ #include #include -#include +#include #include #include diff --git a/tests/mpi/interpolate_04.cc b/tests/mpi/interpolate_04.cc index dceb5dc597..e6e9592dad 100644 --- a/tests/mpi/interpolate_04.cc +++ b/tests/mpi/interpolate_04.cc @@ -35,7 +35,7 @@ #include #include -#include +#include #include #include diff --git a/tests/mpi/p4est_save_01.cc b/tests/mpi/p4est_save_01.cc index 3d3d5ac8fc..6d68b9d067 100644 --- a/tests/mpi/p4est_save_01.cc +++ b/tests/mpi/p4est_save_01.cc @@ -34,7 +34,7 @@ #include #include -#include +#include #include "../tests.h" diff --git a/tests/mpi/p4est_save_02.cc b/tests/mpi/p4est_save_02.cc index c4a75f7f85..1ab383a4d7 100644 --- a/tests/mpi/p4est_save_02.cc +++ b/tests/mpi/p4est_save_02.cc @@ -34,7 +34,7 @@ #include #include -#include +#include #include "../tests.h" diff --git a/tests/mpi/p4est_save_03.cc b/tests/mpi/p4est_save_03.cc index 3fd41bf6bd..9780e2dfae 100644 --- a/tests/mpi/p4est_save_03.cc +++ b/tests/mpi/p4est_save_03.cc @@ -35,7 +35,7 @@ #include #include -#include +#include #include "../tests.h" diff --git a/tests/mpi/p4est_save_04.cc b/tests/mpi/p4est_save_04.cc index a21bf3c275..c908edf673 100644 --- a/tests/mpi/p4est_save_04.cc +++ b/tests/mpi/p4est_save_04.cc @@ -34,7 +34,7 @@ #include #include -#include +#include #include "../tests.h" diff --git a/tests/mpi/periodicity_01.cc b/tests/mpi/periodicity_01.cc index 81aa942ad4..fc2df4bf55 100644 --- a/tests/mpi/periodicity_01.cc +++ b/tests/mpi/periodicity_01.cc @@ -47,10 +47,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/mpi/petsc_01.cc b/tests/mpi/petsc_01.cc index 86dd8706e5..1baf79e6a6 100644 --- a/tests/mpi/petsc_01.cc +++ b/tests/mpi/petsc_01.cc @@ -23,7 +23,6 @@ #include #include -#include #include #include "../tests.h" diff --git a/tests/mpi/petsc_02.cc b/tests/mpi/petsc_02.cc index 74f27f666b..383a68433d 100644 --- a/tests/mpi/petsc_02.cc +++ b/tests/mpi/petsc_02.cc @@ -22,7 +22,6 @@ #include #include -#include #include #include "../tests.h" diff --git a/tests/mpi/petsc_03.cc b/tests/mpi/petsc_03.cc index cda031197a..9bd90ba4e6 100644 --- a/tests/mpi/petsc_03.cc +++ b/tests/mpi/petsc_03.cc @@ -20,8 +20,8 @@ #include -#include -#include +#include +#include #include #include "../tests.h" diff --git a/tests/mpi/petsc_bug_ghost_vector_01.cc b/tests/mpi/petsc_bug_ghost_vector_01.cc index 0316ad1611..084fd40d44 100644 --- a/tests/mpi/petsc_bug_ghost_vector_01.cc +++ b/tests/mpi/petsc_bug_ghost_vector_01.cc @@ -59,10 +59,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/mpi/petsc_distribute_01.cc b/tests/mpi/petsc_distribute_01.cc index 5d790792b9..7bbae02484 100644 --- a/tests/mpi/petsc_distribute_01.cc +++ b/tests/mpi/petsc_distribute_01.cc @@ -28,7 +28,7 @@ // x_j's so that we can verify the correctness analytically #include -#include +#include #include diff --git a/tests/mpi/petsc_distribute_01_block.cc b/tests/mpi/petsc_distribute_01_block.cc index cb6010a4cd..3a85ae6210 100644 --- a/tests/mpi/petsc_distribute_01_block.cc +++ b/tests/mpi/petsc_distribute_01_block.cc @@ -22,7 +22,7 @@ // contiguous #include -#include +#include #include diff --git a/tests/mpi/petsc_distribute_01_inhomogenous.cc b/tests/mpi/petsc_distribute_01_inhomogenous.cc index ae8349d2ea..5fd2327fb7 100644 --- a/tests/mpi/petsc_distribute_01_inhomogenous.cc +++ b/tests/mpi/petsc_distribute_01_inhomogenous.cc @@ -20,7 +20,7 @@ // like _01, but with an inhomogeneity #include -#include +#include #include diff --git a/tests/mpi/petsc_locally_owned_elements.cc b/tests/mpi/petsc_locally_owned_elements.cc index 26a3f78dd0..f51f773962 100644 --- a/tests/mpi/petsc_locally_owned_elements.cc +++ b/tests/mpi/petsc_locally_owned_elements.cc @@ -22,7 +22,7 @@ // contiguous #include -#include +#include #include diff --git a/tests/mpi/solution_transfer_01.cc b/tests/mpi/solution_transfer_01.cc index 52c0d7989c..f789e2ecf4 100644 --- a/tests/mpi/solution_transfer_01.cc +++ b/tests/mpi/solution_transfer_01.cc @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include diff --git a/tests/mpi/step-40.cc b/tests/mpi/step-40.cc index e4486a4258..145dc92c04 100644 --- a/tests/mpi/step-40.cc +++ b/tests/mpi/step-40.cc @@ -40,10 +40,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/mpi/step-40_cuthill_mckee.cc b/tests/mpi/step-40_cuthill_mckee.cc index 763c8501e8..7d276f508f 100644 --- a/tests/mpi/step-40_cuthill_mckee.cc +++ b/tests/mpi/step-40_cuthill_mckee.cc @@ -45,10 +45,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc b/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc index 3f7be85e07..55791689c1 100644 --- a/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc +++ b/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc @@ -46,10 +46,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/mpi/step-40_direct_solver.cc b/tests/mpi/step-40_direct_solver.cc index f2f4109027..f94e6691eb 100644 --- a/tests/mpi/step-40_direct_solver.cc +++ b/tests/mpi/step-40_direct_solver.cc @@ -40,10 +40,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/multigrid/transfer_04b.cc b/tests/multigrid/transfer_04b.cc index db52a3ba5d..7fc1e18fa4 100644 --- a/tests/multigrid/transfer_04b.cc +++ b/tests/multigrid/transfer_04b.cc @@ -33,7 +33,7 @@ #include #include -#include +#include #include #include diff --git a/tests/petsc/11.cc b/tests/petsc/11.cc index 20db5d7abe..e0431b836d 100644 --- a/tests/petsc/11.cc +++ b/tests/petsc/11.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::size() -#include +#include #include diff --git a/tests/petsc/12.cc b/tests/petsc/12.cc index e0f9d4f3ca..82627117ca 100644 --- a/tests/petsc/12.cc +++ b/tests/petsc/12.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator() in set-mode -#include +#include #include #include diff --git a/tests/petsc/13.cc b/tests/petsc/13.cc index b3740eeff6..fdb2b84c3f 100644 --- a/tests/petsc/13.cc +++ b/tests/petsc/13.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator() in add-mode -#include +#include #include #include diff --git a/tests/petsc/17.cc b/tests/petsc/17.cc index 32c695f366..efc39d4b95 100644 --- a/tests/petsc/17.cc +++ b/tests/petsc/17.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::l1_norm() -#include +#include #include #include diff --git a/tests/petsc/18.cc b/tests/petsc/18.cc index 0cdd50a547..eddbf15b2e 100644 --- a/tests/petsc/18.cc +++ b/tests/petsc/18.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::l2_norm() -#include +#include #include #include diff --git a/tests/petsc/19.cc b/tests/petsc/19.cc index 8357ccaf6d..5007926fe9 100644 --- a/tests/petsc/19.cc +++ b/tests/petsc/19.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::linfty_norm() -#include +#include #include #include diff --git a/tests/petsc/20.cc b/tests/petsc/20.cc index ab904c01a2..1c5eeccd71 100644 --- a/tests/petsc/20.cc +++ b/tests/petsc/20.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator *= -#include +#include #include #include diff --git a/tests/petsc/21.cc b/tests/petsc/21.cc index c278f70792..3ed97ab29a 100644 --- a/tests/petsc/21.cc +++ b/tests/petsc/21.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator /= -#include +#include #include #include diff --git a/tests/petsc/22.cc b/tests/petsc/22.cc index e40e8b1382..848133c7c7 100644 --- a/tests/petsc/22.cc +++ b/tests/petsc/22.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are // orthogonal -#include +#include #include #include diff --git a/tests/petsc/23.cc b/tests/petsc/23.cc index 27f9b94294..e4e8570365 100644 --- a/tests/petsc/23.cc +++ b/tests/petsc/23.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are // not orthogonal -#include +#include #include #include diff --git a/tests/petsc/24.cc b/tests/petsc/24.cc index 4508c1268a..8a08e28c0a 100644 --- a/tests/petsc/24.cc +++ b/tests/petsc/24.cc @@ -19,7 +19,7 @@ // this function has since been removed, so we test for v=0 instead, although // that may be covered by one of the other tests -#include +#include #include #include diff --git a/tests/petsc/26.cc b/tests/petsc/26.cc index 7cc0bd38f7..82e0773c7c 100644 --- a/tests/petsc/26.cc +++ b/tests/petsc/26.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator = (PetscScalar) with setting to a // nonzero value -#include +#include #include #include diff --git a/tests/petsc/27.cc b/tests/petsc/27.cc index 72089a371d..0be92317e9 100644 --- a/tests/petsc/27.cc +++ b/tests/petsc/27.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator = (Vector) -#include +#include #include #include diff --git a/tests/petsc/28.cc b/tests/petsc/28.cc index 6d7097129d..829a3e1f42 100644 --- a/tests/petsc/28.cc +++ b/tests/petsc/28.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator = (Vector), except that we don't // resize the vector to be copied to beforehand -#include +#include #include #include diff --git a/tests/petsc/29.cc b/tests/petsc/29.cc index 6089f2709d..8b1e588e3e 100644 --- a/tests/petsc/29.cc +++ b/tests/petsc/29.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::reinit(fast) -#include +#include #include #include diff --git a/tests/petsc/30.cc b/tests/petsc/30.cc index dd9b6ee479..4bf77225e6 100644 --- a/tests/petsc/30.cc +++ b/tests/petsc/30.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::reinit(!fast) -#include +#include #include #include diff --git a/tests/petsc/31.cc b/tests/petsc/31.cc index ad0855f698..55150a1364 100644 --- a/tests/petsc/31.cc +++ b/tests/petsc/31.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::l2_norm() -#include +#include #include #include diff --git a/tests/petsc/32.cc b/tests/petsc/32.cc index 5c28123963..3a7be82217 100644 --- a/tests/petsc/32.cc +++ b/tests/petsc/32.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::mean_value() -#include +#include #include #include diff --git a/tests/petsc/33.cc b/tests/petsc/33.cc index 8fd6ee1864..0f48f143b2 100644 --- a/tests/petsc/33.cc +++ b/tests/petsc/33.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::lp_norm(3) -#include +#include #include #include diff --git a/tests/petsc/34.cc b/tests/petsc/34.cc index 3677e3a625..b7fa1d0522 100644 --- a/tests/petsc/34.cc +++ b/tests/petsc/34.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::all_zero -#include +#include #include #include diff --git a/tests/petsc/35.cc b/tests/petsc/35.cc index a01736e4dc..338568706b 100644 --- a/tests/petsc/35.cc +++ b/tests/petsc/35.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator+=(Vector) -#include +#include #include #include diff --git a/tests/petsc/36.cc b/tests/petsc/36.cc index 2c93aceb11..13a437a5fe 100644 --- a/tests/petsc/36.cc +++ b/tests/petsc/36.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator-=(Vector) -#include +#include #include #include diff --git a/tests/petsc/37.cc b/tests/petsc/37.cc index a4a8eafe87..48470ed1ac 100644 --- a/tests/petsc/37.cc +++ b/tests/petsc/37.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::add (scalar) -#include +#include #include #include diff --git a/tests/petsc/39.cc b/tests/petsc/39.cc index 3150d8ce85..e71dff2d7c 100644 --- a/tests/petsc/39.cc +++ b/tests/petsc/39.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::add(scalar, Vector) -#include +#include #include #include diff --git a/tests/petsc/40.cc b/tests/petsc/40.cc index ee3e06d91a..f76bf5960b 100644 --- a/tests/petsc/40.cc +++ b/tests/petsc/40.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::add(s,V,s,V) -#include +#include #include #include diff --git a/tests/petsc/41.cc b/tests/petsc/41.cc index 2fdbbb7035..58387bc7ae 100644 --- a/tests/petsc/41.cc +++ b/tests/petsc/41.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::sadd(s, Vector) -#include +#include #include #include diff --git a/tests/petsc/42.cc b/tests/petsc/42.cc index 56aea8764d..6818d293ce 100644 --- a/tests/petsc/42.cc +++ b/tests/petsc/42.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::sadd(scalar, scalar, Vector) -#include +#include #include #include diff --git a/tests/petsc/45.cc b/tests/petsc/45.cc index c22aac23b6..1836acb31e 100644 --- a/tests/petsc/45.cc +++ b/tests/petsc/45.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::scale -#include +#include #include #include diff --git a/tests/petsc/46.cc b/tests/petsc/46.cc index b6d1009993..c7e6d8f500 100644 --- a/tests/petsc/46.cc +++ b/tests/petsc/46.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::equ (s,V) -#include +#include #include #include diff --git a/tests/petsc/48.cc b/tests/petsc/48.cc index 5b3cddebab..45f877e494 100644 --- a/tests/petsc/48.cc +++ b/tests/petsc/48.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::ratio -#include +#include #include #include diff --git a/tests/petsc/49.cc b/tests/petsc/49.cc index 030afa73a2..fc6bd3c702 100644 --- a/tests/petsc/49.cc +++ b/tests/petsc/49.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator = (Vector) -#include +#include #include #include diff --git a/tests/petsc/50.cc b/tests/petsc/50.cc index 1abb9087ea..90050c76b7 100644 --- a/tests/petsc/50.cc +++ b/tests/petsc/50.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator = (Vector) with T!=PetscScalar -#include +#include #include #include diff --git a/tests/petsc/51.cc b/tests/petsc/51.cc index 50073cbfaa..4ce1500e9c 100644 --- a/tests/petsc/51.cc +++ b/tests/petsc/51.cc @@ -17,7 +17,7 @@ // check copy constructor PETScWrappers::MPI::Vector::Vector(Vector) -#include +#include #include #include diff --git a/tests/petsc/55.cc b/tests/petsc/55.cc index 128aa13230..df087c5f19 100644 --- a/tests/petsc/55.cc +++ b/tests/petsc/55.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator() in set, and later in *= mode -#include +#include #include #include diff --git a/tests/petsc/56.cc b/tests/petsc/56.cc index fee054f48c..02bde6c5a7 100644 --- a/tests/petsc/56.cc +++ b/tests/petsc/56.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator() in set, and later in /= mode -#include +#include #include #include diff --git a/tests/petsc/57.cc b/tests/petsc/57.cc index 62784ace0f..3f25ddfd94 100644 --- a/tests/petsc/57.cc +++ b/tests/petsc/57.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::is_non_zero -#include +#include #include #include diff --git a/tests/petsc/58.cc b/tests/petsc/58.cc index b467b4a1c5..b2279ca490 100644 --- a/tests/petsc/58.cc +++ b/tests/petsc/58.cc @@ -17,7 +17,7 @@ // check ::Vector (const PETScWrappers::MPI::Vector &) copy constructor -#include +#include #include #include diff --git a/tests/petsc/59.cc b/tests/petsc/59.cc index de1871bbb5..b6f2fa5dff 100644 --- a/tests/petsc/59.cc +++ b/tests/petsc/59.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector (const ::Vector &) copy constructor -#include +#include #include #include diff --git a/tests/petsc/60.cc b/tests/petsc/60.cc index 8c13eff032..b6ccb9469c 100644 --- a/tests/petsc/60.cc +++ b/tests/petsc/60.cc @@ -17,7 +17,7 @@ // check ::Vector::operator = (const PETScWrappers::MPI::Vector &) -#include +#include #include #include diff --git a/tests/petsc/61.cc b/tests/petsc/61.cc index 8c6dda9822..4046f6a49f 100644 --- a/tests/petsc/61.cc +++ b/tests/petsc/61.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator = (const ::Vector &) -#include +#include #include #include diff --git a/tests/petsc/64.cc b/tests/petsc/64.cc index 0fafe13e7d..bcff03544b 100644 --- a/tests/petsc/64.cc +++ b/tests/petsc/64.cc @@ -20,7 +20,6 @@ // PETScWrappers::MatrixBase::operator= -#include #include #include diff --git a/tests/petsc/65.cc b/tests/petsc/65.cc index 00c088cdb8..fdeb1a4b5e 100644 --- a/tests/petsc/65.cc +++ b/tests/petsc/65.cc @@ -18,7 +18,7 @@ // This test used to fail after upgrading to petsc 2.2.1 -#include +#include #include #include diff --git a/tests/petsc/70.cc b/tests/petsc/70.cc index ba6db797be..2cbe3bda3a 100644 --- a/tests/petsc/70.cc +++ b/tests/petsc/70.cc @@ -17,7 +17,7 @@ // check PetscScalar -#include +#include #include "../tests.h" diff --git a/tests/petsc/block_vector_iterator_01.cc b/tests/petsc/block_vector_iterator_01.cc index 0fd3f884ba..0a1afe2c5a 100644 --- a/tests/petsc/block_vector_iterator_01.cc +++ b/tests/petsc/block_vector_iterator_01.cc @@ -17,7 +17,7 @@ // make sure that block vector iterator allows reading and writing correctly -#include +#include #include diff --git a/tests/petsc/block_vector_iterator_02.cc b/tests/petsc/block_vector_iterator_02.cc index 04e10315a4..6daecb23dd 100644 --- a/tests/petsc/block_vector_iterator_02.cc +++ b/tests/petsc/block_vector_iterator_02.cc @@ -17,7 +17,7 @@ // like _01, except that we use operator[] instead of operator* -#include +#include #include diff --git a/tests/petsc/block_vector_iterator_03.cc b/tests/petsc/block_vector_iterator_03.cc index 46d0d80b60..c1ffd2df33 100644 --- a/tests/petsc/block_vector_iterator_03.cc +++ b/tests/petsc/block_vector_iterator_03.cc @@ -17,7 +17,7 @@ // this test is an adaptation of lac/block_vector_iterator for PETSc block // vectors -#include +#include #include #include diff --git a/tests/petsc/copy_parallel_vector.cc b/tests/petsc/copy_parallel_vector.cc index 71f9dc8b40..33a52b5c48 100644 --- a/tests/petsc/copy_parallel_vector.cc +++ b/tests/petsc/copy_parallel_vector.cc @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include "../tests.h" diff --git a/tests/petsc/copy_to_dealvec.cc b/tests/petsc/copy_to_dealvec.cc index c44000a61d..dee7f516de 100644 --- a/tests/petsc/copy_to_dealvec.cc +++ b/tests/petsc/copy_to_dealvec.cc @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include diff --git a/tests/petsc/copy_to_dealvec_block.cc b/tests/petsc/copy_to_dealvec_block.cc index cb154d4af8..a079938bad 100644 --- a/tests/petsc/copy_to_dealvec_block.cc +++ b/tests/petsc/copy_to_dealvec_block.cc @@ -22,8 +22,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/tests/petsc/deal_solver_01.cc b/tests/petsc/deal_solver_01.cc index d11ff12d76..7300bd2fc9 100644 --- a/tests/petsc/deal_solver_01.cc +++ b/tests/petsc/deal_solver_01.cc @@ -17,8 +17,8 @@ // test the CG solver using the PETSc matrix and vector classes -#include #include +#include #include #include #include diff --git a/tests/petsc/deal_solver_02.cc b/tests/petsc/deal_solver_02.cc index a1e9eebf80..a47da420be 100644 --- a/tests/petsc/deal_solver_02.cc +++ b/tests/petsc/deal_solver_02.cc @@ -18,8 +18,8 @@ -#include #include +#include #include #include #include diff --git a/tests/petsc/deal_solver_03.cc b/tests/petsc/deal_solver_03.cc index 421eb90376..694064beb6 100644 --- a/tests/petsc/deal_solver_03.cc +++ b/tests/petsc/deal_solver_03.cc @@ -18,8 +18,8 @@ -#include #include +#include #include #include #include diff --git a/tests/petsc/deal_solver_04.cc b/tests/petsc/deal_solver_04.cc index f8bc4bfccb..08f8aef33c 100644 --- a/tests/petsc/deal_solver_04.cc +++ b/tests/petsc/deal_solver_04.cc @@ -17,8 +17,8 @@ // test the MINRES solver using the PETSc matrix and vector classes -#include #include +#include #include #include #include diff --git a/tests/petsc/deal_solver_05.cc b/tests/petsc/deal_solver_05.cc index 2c8e74fdc0..bd62396ed7 100644 --- a/tests/petsc/deal_solver_05.cc +++ b/tests/petsc/deal_solver_05.cc @@ -17,8 +17,8 @@ // test the QMRS solver using the PETSc matrix and vector classes -#include #include +#include #include #include #include diff --git a/tests/petsc/different_matrix_preconditioner.cc b/tests/petsc/different_matrix_preconditioner.cc index 99ba6650c0..a797d27e77 100644 --- a/tests/petsc/different_matrix_preconditioner.cc +++ b/tests/petsc/different_matrix_preconditioner.cc @@ -29,10 +29,10 @@ #include #include -#include #include #include #include +#include #include #include diff --git a/tests/petsc/full_matrix_vector_01.cc b/tests/petsc/full_matrix_vector_01.cc index 471ee105a4..d21f32ae3b 100644 --- a/tests/petsc/full_matrix_vector_01.cc +++ b/tests/petsc/full_matrix_vector_01.cc @@ -18,7 +18,7 @@ // check FullMatrix::vmult #include -#include +#include #include #include diff --git a/tests/petsc/full_matrix_vector_02.cc b/tests/petsc/full_matrix_vector_02.cc index 9b3199d64a..db8282c1fd 100644 --- a/tests/petsc/full_matrix_vector_02.cc +++ b/tests/petsc/full_matrix_vector_02.cc @@ -18,7 +18,7 @@ // check FullMatrix::Tvmult #include -#include +#include #include #include diff --git a/tests/petsc/full_matrix_vector_03.cc b/tests/petsc/full_matrix_vector_03.cc index e3296ed3a0..01bc509f4d 100644 --- a/tests/petsc/full_matrix_vector_03.cc +++ b/tests/petsc/full_matrix_vector_03.cc @@ -18,7 +18,7 @@ // check FullMatrix::vmult_add #include -#include +#include #include #include diff --git a/tests/petsc/full_matrix_vector_04.cc b/tests/petsc/full_matrix_vector_04.cc index d58e78c5dc..a2f831c153 100644 --- a/tests/petsc/full_matrix_vector_04.cc +++ b/tests/petsc/full_matrix_vector_04.cc @@ -18,7 +18,7 @@ // check FullMatrix::Tvmult_add #include -#include +#include #include #include diff --git a/tests/petsc/full_matrix_vector_05.cc b/tests/petsc/full_matrix_vector_05.cc index 3df6b22534..416714549c 100644 --- a/tests/petsc/full_matrix_vector_05.cc +++ b/tests/petsc/full_matrix_vector_05.cc @@ -18,7 +18,7 @@ // check FullMatrix::matrix_scalar_product #include -#include +#include #include #include diff --git a/tests/petsc/full_matrix_vector_06.cc b/tests/petsc/full_matrix_vector_06.cc index cc885f5e6c..a7d87e31c5 100644 --- a/tests/petsc/full_matrix_vector_06.cc +++ b/tests/petsc/full_matrix_vector_06.cc @@ -18,7 +18,7 @@ // check FullMatrix::matrix_norm_square #include -#include +#include #include #include diff --git a/tests/petsc/full_matrix_vector_07.cc b/tests/petsc/full_matrix_vector_07.cc index 7640ebb6a8..26f246a058 100644 --- a/tests/petsc/full_matrix_vector_07.cc +++ b/tests/petsc/full_matrix_vector_07.cc @@ -18,7 +18,7 @@ // check FullMatrix::matrix_norm_square #include -#include +#include #include #include diff --git a/tests/petsc/iterate_parallel_01.cc b/tests/petsc/iterate_parallel_01.cc index b92b8821bf..2698e8234a 100644 --- a/tests/petsc/iterate_parallel_01.cc +++ b/tests/petsc/iterate_parallel_01.cc @@ -26,8 +26,8 @@ #include #include -#include #include +#include #include #include diff --git a/tests/petsc/parallel_sparse_matrix_01.cc b/tests/petsc/parallel_sparse_matrix_01.cc index 3b704e8ae7..ac0f7a4af5 100644 --- a/tests/petsc/parallel_sparse_matrix_01.cc +++ b/tests/petsc/parallel_sparse_matrix_01.cc @@ -27,7 +27,7 @@ // malloc calls have been performed #include -#include +#include #include "../tests.h" diff --git a/tests/petsc/reinit_preconditioner_01.cc b/tests/petsc/reinit_preconditioner_01.cc index abbc5c27ef..eab44248ea 100644 --- a/tests/petsc/reinit_preconditioner_01.cc +++ b/tests/petsc/reinit_preconditioner_01.cc @@ -19,8 +19,8 @@ #include #include -#include #include +#include #include #include diff --git a/tests/petsc/reinit_preconditioner_02.cc b/tests/petsc/reinit_preconditioner_02.cc index f1bd19418f..7fd306e239 100644 --- a/tests/petsc/reinit_preconditioner_02.cc +++ b/tests/petsc/reinit_preconditioner_02.cc @@ -20,9 +20,9 @@ #include #include -#include #include #include +#include #include #include diff --git a/tests/petsc/slowness_02.cc b/tests/petsc/slowness_02.cc index 7c5b62a904..2f21b646a3 100644 --- a/tests/petsc/slowness_02.cc +++ b/tests/petsc/slowness_02.cc @@ -22,8 +22,8 @@ // // the tests build the 5-point stencil matrix for a uniform grid of size N*N -#include #include +#include #include #include diff --git a/tests/petsc/slowness_03.cc b/tests/petsc/slowness_03.cc index cb2e0b8626..28fef25dff 100644 --- a/tests/petsc/slowness_03.cc +++ b/tests/petsc/slowness_03.cc @@ -22,8 +22,8 @@ // // the tests build the 5-point stencil matrix for a uniform grid of size N*N -#include -#include +#include +#include #include #include diff --git a/tests/petsc/slowness_04.cc b/tests/petsc/slowness_04.cc index c4aa715406..631a1e5c32 100644 --- a/tests/petsc/slowness_04.cc +++ b/tests/petsc/slowness_04.cc @@ -28,8 +28,8 @@ // matrix in a consecutive fashion, but rather according to the order of // degrees of freedom in the sequence of cells that we traverse -#include -#include +#include +#include #include #include diff --git a/tests/petsc/solver_01.cc b/tests/petsc/solver_01.cc index 640e410b07..d8d4ca1d58 100644 --- a/tests/petsc/solver_01.cc +++ b/tests/petsc/solver_01.cc @@ -15,10 +15,10 @@ // test the PETSc Richardson solver -#include #include #include #include +#include #include "../testmatrix.h" #include "../tests.h" diff --git a/tests/petsc/solver_02.cc b/tests/petsc/solver_02.cc index 4fab023c4f..a8a60f246e 100644 --- a/tests/petsc/solver_02.cc +++ b/tests/petsc/solver_02.cc @@ -15,10 +15,10 @@ // test the PETSc Chebychev solver -#include #include #include #include +#include #include "../testmatrix.h" #include "../tests.h" diff --git a/tests/petsc/solver_03.cc b/tests/petsc/solver_03.cc index 73b4b981e5..39b4d6725e 100644 --- a/tests/petsc/solver_03.cc +++ b/tests/petsc/solver_03.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_mf.cc b/tests/petsc/solver_03_mf.cc index 4951426d53..8fd0fa7b1f 100644 --- a/tests/petsc/solver_03_mf.cc +++ b/tests/petsc/solver_03_mf.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver with PETSc MatrixFree class -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_boomeramg.cc b/tests/petsc/solver_03_precondition_boomeramg.cc index 85052b8236..c8e4379eca 100644 --- a/tests/petsc/solver_03_precondition_boomeramg.cc +++ b/tests/petsc/solver_03_precondition_boomeramg.cc @@ -18,10 +18,10 @@ // preconditioner -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_boomeramg_symmetric.cc b/tests/petsc/solver_03_precondition_boomeramg_symmetric.cc index 729d689df2..daa67a74c9 100644 --- a/tests/petsc/solver_03_precondition_boomeramg_symmetric.cc +++ b/tests/petsc/solver_03_precondition_boomeramg_symmetric.cc @@ -19,10 +19,10 @@ // matrix -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_eisenstat.cc b/tests/petsc/solver_03_precondition_eisenstat.cc index ef474c23a7..318ab358ec 100644 --- a/tests/petsc/solver_03_precondition_eisenstat.cc +++ b/tests/petsc/solver_03_precondition_eisenstat.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_icc.cc b/tests/petsc/solver_03_precondition_icc.cc index bdfa8d9f2d..79d415c37c 100644 --- a/tests/petsc/solver_03_precondition_icc.cc +++ b/tests/petsc/solver_03_precondition_icc.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_ilu.cc b/tests/petsc/solver_03_precondition_ilu.cc index 29b19068e1..2e1b70f296 100644 --- a/tests/petsc/solver_03_precondition_ilu.cc +++ b/tests/petsc/solver_03_precondition_ilu.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_lu.cc b/tests/petsc/solver_03_precondition_lu.cc index 689295dce8..7208b021fc 100644 --- a/tests/petsc/solver_03_precondition_lu.cc +++ b/tests/petsc/solver_03_precondition_lu.cc @@ -18,10 +18,10 @@ // preconditioner. This should converge in exactly one iteration -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_parasails.cc b/tests/petsc/solver_03_precondition_parasails.cc index 770957c885..4ee6b6bd08 100644 --- a/tests/petsc/solver_03_precondition_parasails.cc +++ b/tests/petsc/solver_03_precondition_parasails.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_sor.cc b/tests/petsc/solver_03_precondition_sor.cc index 3f0b76ba97..1c10a5670b 100644 --- a/tests/petsc/solver_03_precondition_sor.cc +++ b/tests/petsc/solver_03_precondition_sor.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_03_precondition_ssor.cc b/tests/petsc/solver_03_precondition_ssor.cc index 4b944c1dc6..155981cb15 100644 --- a/tests/petsc/solver_03_precondition_ssor.cc +++ b/tests/petsc/solver_03_precondition_ssor.cc @@ -17,10 +17,10 @@ // test the PETSc CG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_04.cc b/tests/petsc/solver_04.cc index cd31ad5f57..5b9b788805 100644 --- a/tests/petsc/solver_04.cc +++ b/tests/petsc/solver_04.cc @@ -17,10 +17,10 @@ // test the PETSc BiCG solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_05.cc b/tests/petsc/solver_05.cc index 9dbabcba58..5b3cd115f9 100644 --- a/tests/petsc/solver_05.cc +++ b/tests/petsc/solver_05.cc @@ -17,10 +17,10 @@ // test the PETSc GMRES solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_06.cc b/tests/petsc/solver_06.cc index 429277918d..e779bb9794 100644 --- a/tests/petsc/solver_06.cc +++ b/tests/petsc/solver_06.cc @@ -17,10 +17,10 @@ // test the PETSc Bicgstab solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_07.cc b/tests/petsc/solver_07.cc index 0fa058f58d..4a113c4382 100644 --- a/tests/petsc/solver_07.cc +++ b/tests/petsc/solver_07.cc @@ -17,10 +17,10 @@ // test the PETSc CGS solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_08.cc b/tests/petsc/solver_08.cc index 96ef8d722c..c39c142b3a 100644 --- a/tests/petsc/solver_08.cc +++ b/tests/petsc/solver_08.cc @@ -17,10 +17,10 @@ // test the PETSc TFQMR solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_09.cc b/tests/petsc/solver_09.cc index 59dc9bbaae..581b3c5237 100644 --- a/tests/petsc/solver_09.cc +++ b/tests/petsc/solver_09.cc @@ -21,10 +21,10 @@ -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_10.cc b/tests/petsc/solver_10.cc index 4857fefed5..13aeac5b15 100644 --- a/tests/petsc/solver_10.cc +++ b/tests/petsc/solver_10.cc @@ -17,10 +17,10 @@ // test the PETSc CR solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_11.cc b/tests/petsc/solver_11.cc index b0dda2e417..c8b548caa2 100644 --- a/tests/petsc/solver_11.cc +++ b/tests/petsc/solver_11.cc @@ -15,10 +15,10 @@ // test the PETSc LSQR solver -#include #include #include #include +#include #include "../testmatrix.h" #include "../tests.h" diff --git a/tests/petsc/solver_12.cc b/tests/petsc/solver_12.cc index 61057b3fd0..17dfa5bf6d 100644 --- a/tests/petsc/solver_12.cc +++ b/tests/petsc/solver_12.cc @@ -17,10 +17,10 @@ // test the PETSc PreOnly solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/solver_13.cc b/tests/petsc/solver_13.cc index 96cda6f04e..8eef95e116 100644 --- a/tests/petsc/solver_13.cc +++ b/tests/petsc/solver_13.cc @@ -17,10 +17,10 @@ // test the PETScWrapper::Precondition*::vmult -#include #include #include #include +#include #include #include diff --git a/tests/petsc/sparse_direct_mumps.cc b/tests/petsc/sparse_direct_mumps.cc index f6169213d8..9179eff497 100644 --- a/tests/petsc/sparse_direct_mumps.cc +++ b/tests/petsc/sparse_direct_mumps.cc @@ -17,10 +17,10 @@ // test the PETSc SparseDirectMumps solver -#include #include #include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_matrix_01.cc b/tests/petsc/sparse_matrix_matrix_01.cc index f2b74a0e66..bf12fd0147 100644 --- a/tests/petsc/sparse_matrix_matrix_01.cc +++ b/tests/petsc/sparse_matrix_matrix_01.cc @@ -17,8 +17,8 @@ // check SparseMatrix::mmult -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_matrix_02.cc b/tests/petsc/sparse_matrix_matrix_02.cc index dcb10f2442..67d3caba8b 100644 --- a/tests/petsc/sparse_matrix_matrix_02.cc +++ b/tests/petsc/sparse_matrix_matrix_02.cc @@ -17,8 +17,8 @@ // check SparseMatrix::Tmmult -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_matrix_03.cc b/tests/petsc/sparse_matrix_matrix_03.cc index a9568a7bad..c9cc052ae9 100644 --- a/tests/petsc/sparse_matrix_matrix_03.cc +++ b/tests/petsc/sparse_matrix_matrix_03.cc @@ -17,8 +17,8 @@ // check SparseMatrix::mmult -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_matrix_04.cc b/tests/petsc/sparse_matrix_matrix_04.cc index cfb1bf3d94..277f31e83e 100644 --- a/tests/petsc/sparse_matrix_matrix_04.cc +++ b/tests/petsc/sparse_matrix_matrix_04.cc @@ -17,8 +17,8 @@ // check SparseMatrix::Tmmult -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_vector_01.cc b/tests/petsc/sparse_matrix_vector_01.cc index f1bfc0963b..bc4c875094 100644 --- a/tests/petsc/sparse_matrix_vector_01.cc +++ b/tests/petsc/sparse_matrix_vector_01.cc @@ -17,8 +17,8 @@ // check SparseMatrix::vmult -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_vector_02.cc b/tests/petsc/sparse_matrix_vector_02.cc index cd0aa6c6e6..4b5d8a8da3 100644 --- a/tests/petsc/sparse_matrix_vector_02.cc +++ b/tests/petsc/sparse_matrix_vector_02.cc @@ -17,8 +17,8 @@ // check SparseMatrix::Tvmult -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_vector_03.cc b/tests/petsc/sparse_matrix_vector_03.cc index 6515cd28e5..200942799e 100644 --- a/tests/petsc/sparse_matrix_vector_03.cc +++ b/tests/petsc/sparse_matrix_vector_03.cc @@ -17,8 +17,8 @@ // check SparseMatrix::vmult_add -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_vector_04.cc b/tests/petsc/sparse_matrix_vector_04.cc index 465c1427aa..48b9091cb3 100644 --- a/tests/petsc/sparse_matrix_vector_04.cc +++ b/tests/petsc/sparse_matrix_vector_04.cc @@ -17,8 +17,8 @@ // check SparseMatrix::Tvmult_add -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_vector_05.cc b/tests/petsc/sparse_matrix_vector_05.cc index bd49c98c13..cd30d937ce 100644 --- a/tests/petsc/sparse_matrix_vector_05.cc +++ b/tests/petsc/sparse_matrix_vector_05.cc @@ -17,8 +17,8 @@ // check SparseMatrix::matrix_scalar_product -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_vector_06.cc b/tests/petsc/sparse_matrix_vector_06.cc index e0b85d5aed..7deea00e07 100644 --- a/tests/petsc/sparse_matrix_vector_06.cc +++ b/tests/petsc/sparse_matrix_vector_06.cc @@ -17,8 +17,8 @@ // check SparseMatrix::matrix_norm_square -#include #include +#include #include #include diff --git a/tests/petsc/sparse_matrix_vector_07.cc b/tests/petsc/sparse_matrix_vector_07.cc index 8a8e554ad3..eac5803993 100644 --- a/tests/petsc/sparse_matrix_vector_07.cc +++ b/tests/petsc/sparse_matrix_vector_07.cc @@ -17,8 +17,8 @@ // check SparseMatrix::matrix_norm_square -#include #include +#include #include #include diff --git a/tests/petsc/subtract_mean_value_03.cc b/tests/petsc/subtract_mean_value_03.cc index 85cafac40c..21f4bee5b0 100644 --- a/tests/petsc/subtract_mean_value_03.cc +++ b/tests/petsc/subtract_mean_value_03.cc @@ -17,8 +17,8 @@ // check VectorTools::subtract_mean_value() for PETSc vectors -#include -#include +#include +#include #include diff --git a/tests/petsc/update_ghosts.cc b/tests/petsc/update_ghosts.cc index 2276f504fa..1103acc879 100644 --- a/tests/petsc/update_ghosts.cc +++ b/tests/petsc/update_ghosts.cc @@ -12,7 +12,7 @@ // the top level directory of deal.II. // // --------------------------------------------------------------------- -#include +#include #include "../tests.h" diff --git a/tests/petsc/vector_assign_01.cc b/tests/petsc/vector_assign_01.cc index 65f57f68c4..ec7d2482c1 100644 --- a/tests/petsc/vector_assign_01.cc +++ b/tests/petsc/vector_assign_01.cc @@ -23,7 +23,7 @@ // // this was fixed 2004-04-05, and this test checks that it works -#include +#include #include #include diff --git a/tests/petsc/vector_assign_02.cc b/tests/petsc/vector_assign_02.cc index f53f68e117..20b0cc9f8c 100644 --- a/tests/petsc/vector_assign_02.cc +++ b/tests/petsc/vector_assign_02.cc @@ -22,7 +22,7 @@ // argument to the user-defined operator+=. This is not exciting, but since I // wrote the test to make sure it works this way, let's keep it then... -#include +#include #include #include diff --git a/tests/petsc/vector_equality_1.cc b/tests/petsc/vector_equality_1.cc index c6123add50..abc7932ea1 100644 --- a/tests/petsc/vector_equality_1.cc +++ b/tests/petsc/vector_equality_1.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector) // for vectors that are not equal -#include +#include #include #include diff --git a/tests/petsc/vector_equality_2.cc b/tests/petsc/vector_equality_2.cc index 82f4088d4c..da1e8f568a 100644 --- a/tests/petsc/vector_equality_2.cc +++ b/tests/petsc/vector_equality_2.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector) // for vectors that are equal -#include +#include #include #include diff --git a/tests/petsc/vector_equality_3.cc b/tests/petsc/vector_equality_3.cc index 933894706a..22018deac5 100644 --- a/tests/petsc/vector_equality_3.cc +++ b/tests/petsc/vector_equality_3.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator!=(PETScWrappers::MPI::Vector) // for vectors that are not equal -#include +#include #include #include diff --git a/tests/petsc/vector_equality_4.cc b/tests/petsc/vector_equality_4.cc index fc02aacaf1..3ddb59e231 100644 --- a/tests/petsc/vector_equality_4.cc +++ b/tests/petsc/vector_equality_4.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator!=(PETScWrappers::MPI::Vector) // for vectors that are equal -#include +#include #include #include diff --git a/tests/petsc/vector_print.cc b/tests/petsc/vector_print.cc index f49cfff25b..23c89ef545 100644 --- a/tests/petsc/vector_print.cc +++ b/tests/petsc/vector_print.cc @@ -18,7 +18,7 @@ // verify that VectorBase::print uses the precision parameter correctly and // restores the previous value of the stream precision -#include +#include #include #include diff --git a/tests/petsc/vector_wrap_01.cc b/tests/petsc/vector_wrap_01.cc index 8054a97bde..e3d92e2886 100644 --- a/tests/petsc/vector_wrap_01.cc +++ b/tests/petsc/vector_wrap_01.cc @@ -18,7 +18,7 @@ // Test the constructor PETScWrappers::VectorBase(const Vec &) that takes an // existing PETSc vector. -#include +#include #include #include diff --git a/tests/petsc_complex/11.cc b/tests/petsc_complex/11.cc index 32348fd238..20a4b99a02 100644 --- a/tests/petsc_complex/11.cc +++ b/tests/petsc_complex/11.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::size() -#include +#include #include diff --git a/tests/petsc_complex/12.cc b/tests/petsc_complex/12.cc index 42fd257896..5389891c2a 100644 --- a/tests/petsc_complex/12.cc +++ b/tests/petsc_complex/12.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator() in set-mode -#include +#include #include #include diff --git a/tests/petsc_complex/13.cc b/tests/petsc_complex/13.cc index a32035b206..0565b06764 100644 --- a/tests/petsc_complex/13.cc +++ b/tests/petsc_complex/13.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::operator() in add-mode -#include +#include #include #include diff --git a/tests/petsc_complex/17.cc b/tests/petsc_complex/17.cc index 1fa076a422..cfc75db823 100644 --- a/tests/petsc_complex/17.cc +++ b/tests/petsc_complex/17.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::l1_norm() -#include +#include #include #include diff --git a/tests/petsc_complex/18.cc b/tests/petsc_complex/18.cc index b416e9dcba..5f258322bc 100644 --- a/tests/petsc_complex/18.cc +++ b/tests/petsc_complex/18.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::l2_norm() -#include +#include #include #include diff --git a/tests/petsc_complex/19.cc b/tests/petsc_complex/19.cc index ff6b49ed51..1e88fe1c64 100644 --- a/tests/petsc_complex/19.cc +++ b/tests/petsc_complex/19.cc @@ -17,7 +17,7 @@ // check PETScWrappers::MPI::Vector::linfty_norm() -#include +#include #include #include diff --git a/tests/petsc_complex/20.cc b/tests/petsc_complex/20.cc index b9c0b50c86..607a1bc97b 100644 --- a/tests/petsc_complex/20.cc +++ b/tests/petsc_complex/20.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator*(Vector) on two vectors that are // not orthogonal -#include +#include #include #include diff --git a/tests/petsc_complex/assemble_01.cc b/tests/petsc_complex/assemble_01.cc index ed017c0141..a94ca21541 100644 --- a/tests/petsc_complex/assemble_01.cc +++ b/tests/petsc_complex/assemble_01.cc @@ -20,9 +20,8 @@ #include #include -#include -#include #include +#include #include #include diff --git a/tests/petsc_complex/element_access_00.cc b/tests/petsc_complex/element_access_00.cc index aa7097bdd3..f143011875 100644 --- a/tests/petsc_complex/element_access_00.cc +++ b/tests/petsc_complex/element_access_00.cc @@ -15,7 +15,7 @@ // deal.II includes -#include +#include #include #include diff --git a/tests/petsc_complex/fe_get_function_values.cc b/tests/petsc_complex/fe_get_function_values.cc index a85328170a..583d8dc2ff 100644 --- a/tests/petsc_complex/fe_get_function_values.cc +++ b/tests/petsc_complex/fe_get_function_values.cc @@ -42,8 +42,8 @@ #include #include #include -#include -#include +#include +#include #include #include "../tests.h" diff --git a/tests/petsc_complex/parallel_sparse_matrix_01.cc b/tests/petsc_complex/parallel_sparse_matrix_01.cc index cc55665e0f..ef57ac9d8d 100644 --- a/tests/petsc_complex/parallel_sparse_matrix_01.cc +++ b/tests/petsc_complex/parallel_sparse_matrix_01.cc @@ -42,8 +42,8 @@ #include #include #include -#include -#include +#include +#include #include #include diff --git a/tests/petsc_complex/solver_real_01.cc b/tests/petsc_complex/solver_real_01.cc index 87bd6f0794..3a507cb5a8 100644 --- a/tests/petsc_complex/solver_real_01.cc +++ b/tests/petsc_complex/solver_real_01.cc @@ -18,10 +18,10 @@ // Note: This is (almost) a clone of the tests/petsc/solver_01.cc -#include #include #include #include +#include #include "../testmatrix.h" #include "../tests.h" diff --git a/tests/petsc_complex/solver_real_02.cc b/tests/petsc_complex/solver_real_02.cc index a68d9ace47..fc598c3f69 100644 --- a/tests/petsc_complex/solver_real_02.cc +++ b/tests/petsc_complex/solver_real_02.cc @@ -18,10 +18,10 @@ // Note: This is (almost) a clone of the tests/petsc/solver_02.cc -#include #include #include #include +#include #include "../testmatrix.h" #include "../tests.h" diff --git a/tests/petsc_complex/solver_real_03.cc b/tests/petsc_complex/solver_real_03.cc index f5b8063d12..12d8a854b2 100644 --- a/tests/petsc_complex/solver_real_03.cc +++ b/tests/petsc_complex/solver_real_03.cc @@ -18,10 +18,10 @@ // Note: This is (almost) a clone of the tests/petsc/solver_03.cc -#include #include #include #include +#include #include #include diff --git a/tests/petsc_complex/solver_real_03_mf.cc b/tests/petsc_complex/solver_real_03_mf.cc index ca9bb033bf..56be64ba86 100644 --- a/tests/petsc_complex/solver_real_03_mf.cc +++ b/tests/petsc_complex/solver_real_03_mf.cc @@ -32,10 +32,10 @@ // numbers to a possibly // complex matrix where // petsc-scalar=complex. -#include #include #include #include +#include #include #include diff --git a/tests/petsc_complex/solver_real_04.cc b/tests/petsc_complex/solver_real_04.cc index 0a8194e48d..9a97f066ef 100644 --- a/tests/petsc_complex/solver_real_04.cc +++ b/tests/petsc_complex/solver_real_04.cc @@ -18,10 +18,10 @@ // Note: This is (almost) a clone of the tests/petsc/solver_03.cc -#include #include #include #include +#include #include #include diff --git a/tests/petsc_complex/vector_02.cc b/tests/petsc_complex/vector_02.cc index 9bd64b7f58..d330b6deb0 100644 --- a/tests/petsc_complex/vector_02.cc +++ b/tests/petsc_complex/vector_02.cc @@ -17,7 +17,7 @@ // check assignment of elements in Vector -#include +#include #include #include diff --git a/tests/petsc_complex/vector_assign_01.cc b/tests/petsc_complex/vector_assign_01.cc index c06bb498ac..5a9be5a4ff 100644 --- a/tests/petsc_complex/vector_assign_01.cc +++ b/tests/petsc_complex/vector_assign_01.cc @@ -16,7 +16,7 @@ // See notes in petsc/vector_assign_01.cc -#include +#include #include #include diff --git a/tests/petsc_complex/vector_assign_02.cc b/tests/petsc_complex/vector_assign_02.cc index d61d9fe50b..e3fc46dd4a 100644 --- a/tests/petsc_complex/vector_assign_02.cc +++ b/tests/petsc_complex/vector_assign_02.cc @@ -18,7 +18,7 @@ // this is equivalent to the petsc_parallel_vector_assign_01 test, except that // we use operator+= instead of operator=. This is also not exciting... -#include +#include #include #include diff --git a/tests/petsc_complex/vector_equality_1.cc b/tests/petsc_complex/vector_equality_1.cc index 3f0db1a724..174c391522 100644 --- a/tests/petsc_complex/vector_equality_1.cc +++ b/tests/petsc_complex/vector_equality_1.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector) // for vectors that are not equal -#include +#include #include #include diff --git a/tests/petsc_complex/vector_equality_2.cc b/tests/petsc_complex/vector_equality_2.cc index 5cb96e5d72..db6bc45f2f 100644 --- a/tests/petsc_complex/vector_equality_2.cc +++ b/tests/petsc_complex/vector_equality_2.cc @@ -18,7 +18,7 @@ // check PETScWrappers::MPI::Vector::operator==(PETScWrappers::MPI::Vector) // for vectors that are equal -#include +#include #include #include diff --git a/tests/petsc_complex/vector_print.cc b/tests/petsc_complex/vector_print.cc index 2b0a7782b6..1e87b63a83 100644 --- a/tests/petsc_complex/vector_print.cc +++ b/tests/petsc_complex/vector_print.cc @@ -18,7 +18,7 @@ // verify that VectorBase::print uses the precision parameter correctly and // restores the previous value of the stream precision -#include +#include #include #include diff --git a/tests/petsc_complex/vector_wrap_01.cc b/tests/petsc_complex/vector_wrap_01.cc index b2d3fee1a1..e602ba52c5 100644 --- a/tests/petsc_complex/vector_wrap_01.cc +++ b/tests/petsc_complex/vector_wrap_01.cc @@ -18,7 +18,7 @@ // Test the constructor PETScWrappers::VectorBase(const Vec &) that takes an // existing PETSc vector for complex values. -#include +#include #include #include diff --git a/tests/physics/step-18-rotation_matrix.cc b/tests/physics/step-18-rotation_matrix.cc index e4d458b0f9..b3c5617a8b 100644 --- a/tests/physics/step-18-rotation_matrix.cc +++ b/tests/physics/step-18-rotation_matrix.cc @@ -47,10 +47,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include diff --git a/tests/physics/step-18.cc b/tests/physics/step-18.cc index 91fa6fd443..8af2d09427 100644 --- a/tests/physics/step-18.cc +++ b/tests/physics/step-18.cc @@ -48,10 +48,10 @@ #include #include #include -#include -#include #include #include +#include +#include #include #include diff --git a/tests/quick_tests/step-petsc.cc b/tests/quick_tests/step-petsc.cc index b9a2f90ef8..45a27b7279 100644 --- a/tests/quick_tests/step-petsc.cc +++ b/tests/quick_tests/step-petsc.cc @@ -32,10 +32,10 @@ #include #include -#include #include #include #include +#include #include #include diff --git a/tests/quick_tests/step-slepc.cc b/tests/quick_tests/step-slepc.cc index e07fc5fb91..70e1a55bc9 100644 --- a/tests/quick_tests/step-slepc.cc +++ b/tests/quick_tests/step-slepc.cc @@ -32,8 +32,8 @@ #include #include -#include #include +#include #include #include diff --git a/tests/slepc/solve_01.cc b/tests/slepc/solve_01.cc index 6f95002b88..666bd208d1 100644 --- a/tests/slepc/solve_01.cc +++ b/tests/slepc/solve_01.cc @@ -20,10 +20,10 @@ #include -#include #include #include #include +#include #include #include diff --git a/tests/slepc/solve_04.cc b/tests/slepc/solve_04.cc index 07074ee5e3..233ec8f32f 100644 --- a/tests/slepc/solve_04.cc +++ b/tests/slepc/solve_04.cc @@ -20,10 +20,10 @@ #include -#include #include #include #include +#include #include #include diff --git a/tests/slepc/step-36_parallel.cc b/tests/slepc/step-36_parallel.cc index d23651c339..520e6eb0c7 100644 --- a/tests/slepc/step-36_parallel.cc +++ b/tests/slepc/step-36_parallel.cc @@ -31,10 +31,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/slepc/step-36_parallel_02.cc b/tests/slepc/step-36_parallel_02.cc index ea58c35b16..b58873ff8a 100644 --- a/tests/slepc/step-36_parallel_02.cc +++ b/tests/slepc/step-36_parallel_02.cc @@ -31,10 +31,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/slepc/step-36_parallel_03.cc b/tests/slepc/step-36_parallel_03.cc index 394a66acdc..cdb2068806 100644 --- a/tests/slepc/step-36_parallel_03.cc +++ b/tests/slepc/step-36_parallel_03.cc @@ -31,10 +31,10 @@ #include #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/tests/sundials/copy_01.cc b/tests/sundials/copy_01.cc index 3f9736457e..289ebacabc 100644 --- a/tests/sundials/copy_01.cc +++ b/tests/sundials/copy_01.cc @@ -18,7 +18,7 @@ #include -#include +#include #include -- 2.39.5