--- /dev/null
+New: A LinearOperator Payload class supporting Trilinos sparse matrices and
+preconditioners has been developed. LinearOperator's, and their associated
+functionality, have thus been extended so that they can now be used with
+Trilinos algebra types.
+<br>
+(Jean-Paul Pelteret, Matthias Maier, 2017/01/04)
#ifndef dealii__linear_operator_tools_h
#define dealii__linear_operator_tools_h
-// many usage cases lead to a combination of LinearOperator and
-// PackagedOperation. To ease the pain of reading compilation errors, just include
-// all headers we ever need to use LO and friends in one place:
+// Many usage cases lead to a combination of LinearOperator and
+// PackagedOperation. To ease the pain of reading compilation errors, just
+// include all headers we ever need to use LO and friends in one place:
+
#include <deal.II/lac/linear_operator.h>
#include <deal.II/lac/block_linear_operator.h>
+#include <deal.II/lac/trilinos_linear_operator.h>
+
#include <deal.II/lac/packaged_operation.h>
+
#include <deal.II/lac/constrained_linear_operator.h>
#include <deal.II/lac/schur_complement.h>
BaseClass::vmult_nonblock_nonblock (dst, src);
}
-}
+
+#ifdef DEAL_II_WITH_CXX11
+
+
+ namespace internal
+ {
+ namespace BlockLinearOperator
+ {
+
+ /**
+ * This is an extension class to BlockLinearOperators for Trilinos
+ * block sparse matrices.
+ *
+ * @note This class does very little at the moment other than to check
+ * that the correct Payload type for each subblock has been chosen
+ * correctly. Further extensions to the class may be necessary in the
+ * future in order to add further functionality to BlockLinearOperators
+ * while retaining compatability with the Trilinos sparse matrix and
+ * preconditioner classes.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template<typename PayloadBlockType>
+ class TrilinosBlockPayload
+ {
+ public:
+ /**
+ * Type of payload held by each subblock
+ */
+ typedef PayloadBlockType BlockType;
+
+ /**
+ * Default constructor
+ *
+ * This simply checks that the payload for each block has been chosen
+ * correctly (i.e. is of type TrilinosPayload). Apart from this, this
+ * class does not do anything in particular and needs no special
+ * configuration, we have only one generic constructor that can be
+ * called under any conditions.
+ */
+ template <typename... Args>
+ TrilinosBlockPayload (const Args &...)
+ {
+ static_assert(typeid(PayloadBlockType)==typeid(internal::LinearOperator::TrilinosPayload),
+ "TrilinosBlockPayload can only accept a payload of type TrilinosPayload.");
+ }
+ };
+
+ } /*namespace BlockLinearOperator*/
+ } /* namespace internal */
+
+}/* namespace TrilinosWrappers */
+
+
+#endif // DEAL_II_WITH_CXX11
DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii__trilinos_linear_operator_h
+#define dealii__trilinos_linear_operator_h
+
+#include <deal.II/base/config.h>
+
+#if defined(DEAL_II_WITH_CXX11) && defined(DEAL_II_WITH_TRILINOS)
+
+#include <deal.II/lac/linear_operator.h>
+#include <deal.II/lac/block_linear_operator.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace TrilinosWrappers
+{
+
+ // Forward declarations:
+ class SparseMatrix;
+ class PreconditionBase;
+ class BlockSparseMatrix;
+
+ namespace internal
+ {
+ namespace LinearOperator
+ {
+ class TrilinosPayload;
+ }
+
+ namespace BlockLinearOperator
+ {
+ template<typename PayloadBlockType>
+ class TrilinosBlockPayload;
+ }
+ }
+
+
+ /**
+ * @name Creation of a LinearOperator
+ */
+//@{
+
+
+ /**
+ * @relates LinearOperator
+ *
+ * Return a LinearOperator that is the identity of the vector space @p Range.
+ *
+ * This function is the equivalent of the dealii::identity_operator, but
+ * ensures full compatibility with Trilinos operations by preselecting the
+ * appropriate template parameters.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <typename Range>
+ inline LinearOperator<Range, Range, TrilinosWrappers::internal::LinearOperator::TrilinosPayload>
+ identity_operator(const std::function<void(Range &, bool)> &reinit_vector)
+ {
+ typedef TrilinosWrappers::internal::LinearOperator::TrilinosPayload Payload;
+ return dealii::identity_operator<Range, Payload>(reinit_vector);
+ }
+
+
+ /**
+ * @relates LinearOperator
+ *
+ * A function that encapsulates generic @p matrix objects, based on an
+ * @p operator_exemplar, that act on a compatible Vector type into a
+ * LinearOperator.
+ *
+ * This function is the equivalent of the dealii::linear_operator, but
+ * ensures full compatibility with Trilinos operations by preselecting the
+ * appropriate template parameters.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <typename Range, typename Domain = Range,
+ typename Matrix>
+ inline LinearOperator<Range, Domain, TrilinosWrappers::internal::LinearOperator::TrilinosPayload>
+ linear_operator(const TrilinosWrappers::SparseMatrix &operator_exemplar, const Matrix &matrix)
+ {
+ typedef TrilinosWrappers::SparseMatrix OperatorExemplar;
+ typedef TrilinosWrappers::internal::LinearOperator::TrilinosPayload Payload;
+ return dealii::linear_operator<Range, Domain, Payload, OperatorExemplar, Matrix>(operator_exemplar, matrix);
+ }
+
+
+ /**
+ * @relates LinearOperator
+ *
+ * A function that encapsulates generic @p matrix objects that act on a
+ * compatible Vector type into a LinearOperator.
+ *
+ * This function is the equivalent of the dealii::linear_operator, but
+ * ensures full compatibility with Trilinos operations by preselecting the
+ * appropriate template parameters.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <typename Range, typename Domain = Range>
+ inline LinearOperator<Range, Domain, TrilinosWrappers::internal::LinearOperator::TrilinosPayload>
+ linear_operator(const TrilinosWrappers::SparseMatrix &matrix)
+ {
+ typedef TrilinosWrappers::SparseMatrix Matrix;
+ typedef TrilinosWrappers::internal::LinearOperator::TrilinosPayload Payload;
+ return dealii::linear_operator<Range, Domain, Payload, Matrix, Matrix>(matrix, matrix);
+ }
+
+
+//@}
+ /**
+ * @name Creation of a BlockLinearOperator
+ */
+//@{
+
+
+ /**
+ * @relates BlockLinearOperator
+ *
+ * A function that encapsulates a @p block_matrix into a BlockLinearOperator.
+ *
+ * This function is the equivalent of the dealii::block_operator, but
+ * ensures full compatibility with Trilinos operations by preselecting the
+ * appropriate template parameters.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <typename Range,
+ typename Domain = Range>
+ inline BlockLinearOperator<Range, Domain, TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<TrilinosWrappers::internal::LinearOperator::TrilinosPayload> >
+ block_operator(const TrilinosWrappers::BlockSparseMatrix &block_matrix)
+ {
+ typedef TrilinosWrappers::BlockSparseMatrix BlockMatrix;
+ typedef TrilinosWrappers::internal::LinearOperator::TrilinosPayload PayloadBlockType;
+ typedef TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<PayloadBlockType> BlockPayload;
+ return dealii::block_operator<Range,Domain,BlockPayload,BlockMatrix>(block_matrix);
+ }
+
+
+ /**
+ * @relates BlockLinearOperator
+ *
+ * A variant of above function that builds up a block diagonal linear operator
+ * from an array @p ops of diagonal elements (off-diagonal blocks are assumed
+ * to be 0).
+ *
+ * This function is the equivalent of the dealii::block_operator, but
+ * ensures full compatibility with Trilinos operations by preselecting the
+ * appropriate template parameters.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <size_t m, size_t n,
+ typename Range,
+ typename Domain = Range>
+ inline BlockLinearOperator<Range, Domain, TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<TrilinosWrappers::internal::LinearOperator::TrilinosPayload> >
+ block_operator(const std::array<std::array<LinearOperator<typename Range::BlockType, typename Domain::BlockType, TrilinosWrappers::internal::LinearOperator::TrilinosPayload>, n>, m> &ops)
+ {
+ typedef TrilinosWrappers::internal::LinearOperator::TrilinosPayload PayloadBlockType;
+ typedef TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<PayloadBlockType> BlockPayload;
+ return dealii::block_operator<m,n,Range,Domain,BlockPayload>(ops);
+ }
+
+
+ /**
+ * @relates BlockLinearOperator
+ *
+ * This function extracts the diagonal blocks of @p block_matrix (either a
+ * block matrix type or a BlockLinearOperator) and creates a
+ * BlockLinearOperator with the diagonal. Off-diagonal elements are
+ * initialized as null_operator (with correct reinit_range_vector and
+ * reinit_domain_vector methods).
+ *
+ * This function is the equivalent of the dealii::block_diagonal_operator, but
+ * ensures full compatibility with Trilinos operations by preselecting the
+ * appropriate template parameters.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <typename Range,
+ typename Domain = Range>
+ inline BlockLinearOperator<Range, Domain, TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<TrilinosWrappers::internal::LinearOperator::TrilinosPayload> >
+ block_diagonal_operator(const TrilinosWrappers::BlockSparseMatrix &block_matrix)
+ {
+ typedef TrilinosWrappers::BlockSparseMatrix BlockMatrix;
+ typedef TrilinosWrappers::internal::LinearOperator::TrilinosPayload PayloadBlockType;
+ typedef TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<PayloadBlockType> BlockPayload;
+ return dealii::block_diagonal_operator<Range, Domain, BlockPayload, BlockMatrix>(block_matrix);
+ }
+
+
+ /**
+ * @relates BlockLinearOperator
+ *
+ * A variant of above function that builds up a block diagonal linear operator
+ * from an array @p ops of diagonal elements (off-diagonal blocks are assumed
+ * to be 0).
+ *
+ * This function is the equivalent of the dealii::block_diagonal_operator, but
+ * ensures full compatibility with Trilinos operations by preselecting the
+ * appropriate template parameters.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <size_t m, typename Range, typename Domain = Range>
+ inline BlockLinearOperator<Range, Domain, TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<TrilinosWrappers::internal::LinearOperator::TrilinosPayload> >
+ block_diagonal_operator(const std::array<LinearOperator<typename Range::BlockType, typename Domain::BlockType, TrilinosWrappers::internal::LinearOperator::TrilinosPayload>, m> &ops)
+ {
+ typedef TrilinosWrappers::internal::LinearOperator::TrilinosPayload PayloadBlockType;
+ typedef TrilinosWrappers::internal::BlockLinearOperator::TrilinosBlockPayload<PayloadBlockType> BlockPayload;
+ return dealii::block_diagonal_operator<m,Range,Domain,BlockPayload>(ops);
+ }
+
+//@}
+
+} // namespace TrilinosWrappers
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_WITH_CXX11 && DEAL_II_WITH_TRILINOS
+#endif
# include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/vector_view.h>
+#ifdef DEAL_II_WITH_CXX11
+#include <deal.II/lac/vector_memory.h>
+#endif // DEAL_II_WITH_CXX11
+
# include <vector>
# include <cmath>
# include <memory>
# include <Epetra_Map.h>
# include <Epetra_CrsGraph.h>
# include <Epetra_MultiVector.h>
+# include <Epetra_Operator.h>
+# include <Epetra_Comm.h>
# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# include "mpi.h"
# else
-# include "Epetra_SerialComm.h"
+# include <Epetra_SerialComm.h>
# endif
DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
class DynamicSparsityPattern;
+
namespace TrilinosWrappers
{
// forward declarations
* i.e., multiplications, are done in transposed order. However, this does
* not reshape the matrix to transposed form directly, so care should be
* taken when using this flag.
+ *
+ * @note Calling this function any even number of times in succession will
+ * return the object to its original state.
*/
void transpose ();
};
+#ifdef DEAL_II_WITH_CXX11
+
+ // forwards declarations
+ class PreconditionBase;
+
+ namespace internal
+ {
+ namespace
+ {
+ inline
+ void check_vector_map_equality(const Epetra_CrsMatrix &mtrx,
+ const Epetra_MultiVector &src,
+ const Epetra_MultiVector &dst,
+ const bool transpose)
+ {
+ if (transpose == false)
+ {
+ Assert (src.Map().SameAs(mtrx.DomainMap()) == true,
+ ExcMessage ("Column map of matrix does not fit with vector map!"));
+ Assert (dst.Map().SameAs(mtrx.RangeMap()) == true,
+ ExcMessage ("Row map of matrix does not fit with vector map!"));
+ }
+ else
+ {
+ Assert (src.Map().SameAs(mtrx.RangeMap()) == true,
+ ExcMessage ("Column map of matrix does not fit with vector map!"));
+ Assert (dst.Map().SameAs(mtrx.DomainMap()) == true,
+ ExcMessage ("Row map of matrix does not fit with vector map!"));
+ }
+ (void)mtrx; // removes -Wunused-variable in optimized mode
+ (void)src;
+ (void)dst;
+ }
+
+ inline
+ void check_vector_map_equality(const Epetra_Operator &op,
+ const Epetra_MultiVector &src,
+ const Epetra_MultiVector &dst,
+ const bool transpose)
+ {
+ if (transpose == false)
+ {
+ Assert (src.Map().SameAs(op.OperatorDomainMap()) == true,
+ ExcMessage ("Column map of operator does not fit with vector map!"));
+ Assert (dst.Map().SameAs(op.OperatorRangeMap()) == true,
+ ExcMessage ("Row map of operator does not fit with vector map!"));
+ }
+ else
+ {
+ Assert (src.Map().SameAs(op.OperatorRangeMap()) == true,
+ ExcMessage ("Column map of operator does not fit with vector map!"));
+ Assert (dst.Map().SameAs(op.OperatorDomainMap()) == true,
+ ExcMessage ("Row map of operator does not fit with vector map!"));
+ }
+ (void)op; // removes -Wunused-variable in optimized mode
+ (void)src;
+ (void)dst;
+ }
+ }
+
+ namespace LinearOperator
+ {
+
+ /**
+ * This is a extension class to LinearOperators for Trilinos sparse matrix
+ * and preconditioner types. It provides the interface to performing basic
+ * operations (<tt>vmult</tt> and <tt>Tvmult</tt>) on Trilinos vector types.
+ * It fulfills the requirements necessary for wrapping a Trilinos solver,
+ * which calls Epetra_Operator functions, as a LinearOperator.
+ *
+ * @note The TrilinosWrappers::SparseMatrix or
+ * TrilinosWrappers::PreconditionBase that this payload wraps is passed by
+ * reference to the <tt>vmult</tt> and <tt>Tvmult</tt> functions. This
+ * object is not thread-safe when the transpose flag is set on it or the
+ * Trilinos object to which it refers. See the docuemtation for the
+ * TrilinosWrappers::internal::LinearOperator::TrilinosPayload::SetUseTranspose()
+ * function for further details.
+ *
+ * @author Jean-Paul Pelteret, 2016
+ *
+ * @ingroup TrilinosWrappers
+ */
+ class TrilinosPayload
+ : public Epetra_Operator
+ {
+ public:
+
+ /**
+ * Definition for the internally supported vector type.
+ */
+ typedef Epetra_MultiVector VectorType;
+
+ /**
+ * Definition for the vector type for the domain space of the operator.
+ */
+ typedef VectorType Range;
+
+ /**
+ * Definition for the vector type for the range space of the operator.
+ */
+ typedef VectorType Domain;
+
+ /**
+ * @name Constructors / destructor
+ */
+//@{
+
+ /**
+ * Default constructor
+ *
+ * @note By design, the resulting object is inoperable since there is
+ * insufficient information with which to construct the domain and
+ * range maps.
+ */
+ TrilinosPayload ();
+
+ /**
+ * Constructor for a sparse matrix based on an exemplary matrix
+ */
+ TrilinosPayload (const TrilinosWrappers::SparseMatrix &matrix_exemplar,
+ const TrilinosWrappers::SparseMatrix &matrix);
+
+ /**
+ * Constructor for a preconditioner based on an exemplary matrix
+ */
+ TrilinosPayload (const TrilinosWrappers::SparseMatrix &matrix_exemplar,
+ const TrilinosWrappers::PreconditionBase &preconditioner);
+
+ /**
+ * Constructor for a preconditioner based on an exemplary preconditioner
+ */
+ TrilinosPayload (const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
+ const TrilinosWrappers::PreconditionBase &preconditioner);
+
+ /**
+ * Default copy constructor
+ */
+ TrilinosPayload (const TrilinosPayload &payload);
+
+ /**
+ * Composite copy constructor
+ *
+ * This is required for PackagedOperations as it sets up the domain and
+ * range maps, and composite <tt>vmult</tt> and <tt>Tvmult</tt> operations
+ * based on the combined operation of both operations
+ */
+ TrilinosPayload (const TrilinosPayload &first_op,
+ const TrilinosPayload &second_op);
+
+ /**
+ * Destructor
+ */
+ virtual ~TrilinosPayload() {}
+
+ /**
+ * Default copy assignment operator.
+ */
+ TrilinosPayload &
+ operator=(const TrilinosPayload &) = default;
+
+
+ /**
+ * Returns a payload configured for transpose operations
+ */
+ TrilinosPayload transpose_payload () const;
+
+
+ /**
+ * Returns a payload configured for inverse operations
+ *
+ * Invoking this constructor will configure two additional functions,
+ * namely <tt>inv_vmult</tt> and <tt>inv_Tvmult</tt>, both of which wrap
+ * inverse operations.
+ * The <tt>vmult</tt> and <tt>Tvmult</tt> operations retain the standard
+ * definitions inherited from @p op.
+ */
+ template <typename Solver, typename Preconditioner>
+ TrilinosPayload inverse_payload (Solver &, const Preconditioner &) const;
+
+
+//@}
+
+ /**
+ * @name LinearOperator functionality
+ */
+//@{
+
+ /**
+ * Returns an IndexSet that defines the partitioning of the domain space
+ * of this matrix, i.e., the partitioning of the vectors this matrix has
+ * to be multiplied with / operate on.
+ */
+ IndexSet
+ locally_owned_domain_indices () const;
+
+ /**
+ * Returns an IndexSet that defines the partitioning of the range space
+ * of this matrix, i.e., the partitioning of the vectors that result
+ * from matrix-vector products.
+ */
+ IndexSet
+ locally_owned_range_indices () const;
+
+ /**
+ * Return the MPI communicator object in use with this Payload.
+ */
+ MPI_Comm
+ get_mpi_communicator () const;
+
+ /**
+ * Sets an internal flag so that all operations performed by the matrix,
+ * i.e., multiplications, are done in transposed order.
+ * @note This does not reshape the matrix to transposed form directly,
+ * so care should be taken when using this flag.
+ */
+ void
+ transpose ();
+
+ /**
+ * The standard matrix-vector operation to be performed by the payload
+ * when Apply is called.
+ *
+ * @note This is not called by a LinearOperator, but rather by Trilinos
+ * functions that expect this to mimic the action of the LinearOperator.
+ */
+ std::function<void(VectorType &, const VectorType &)> vmult;
+
+ /**
+ * The standard transpose matrix-vector operation to be performed by
+ * the payload when Apply is called.
+ *
+ * @note This is not called by a LinearOperator, but rather by Trilinos
+ * functions that expect this to mimic the action of the LinearOperator.
+ */
+ std::function<void(VectorType &, const VectorType &)> Tvmult;
+
+ /**
+ * The inverse matrix-vector operation to be performed by the payload
+ * when ApplyInverse is called.
+ *
+ * @note This is not called by a LinearOperator, but rather by Trilinos
+ * functions that expect this to mimic the action of the InverseOperator.
+ */
+ std::function<void(VectorType &, const VectorType &)> inv_vmult;
+
+ /**
+ * The inverse transpose matrix-vector operation to be performed by
+ * the payload when ApplyInverse is called.
+ *
+ * @note This is not called by a LinearOperator, but rather by Trilinos
+ * functions that expect this to mimic the action of the InverseOperator.
+ */
+ std::function<void(VectorType &, const VectorType &)> inv_Tvmult;
+
+//@}
+
+ /**
+ * @name Core Epetra_Operator functionality
+ */
+//@{
+
+ /**
+ * Return the status of the transpose flag for this operator
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ */
+ virtual bool
+ UseTranspose () const override;
+
+ /**
+ * Sets an internal flag so that all operations performed by the matrix,
+ * i.e., multiplications, are done in transposed order.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ *
+ * @note This does not reshape the matrix to transposed form directly,
+ * so care should be taken when using this flag. When the flag is set to
+ * true (either here or directly on the underlying Trilinos object
+ * itself), this object is no longer thread-safe. In essense, it is not
+ * possible ensure that the transposed state of the LinearOperator and
+ * the underlying Trilinos object remain synchronized throughout all
+ * operations that may occur on different threads simultaneously.
+ */
+ virtual int
+ SetUseTranspose (bool UseTranspose) override;
+
+ /**
+ * Apply the vmult operation on a vector @p X (of internally defined
+ * type VectorType) and store the result in the vector @p Y.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ *
+ * @note The intended operation depends on the status of the internal
+ * transpose flag. If this flag is set to true, the result will be
+ * the equivalent of performing a Tvmult operation.
+ */
+ virtual int
+ Apply(const VectorType &X,
+ VectorType &Y) const override;
+
+ /**
+ * Apply the vmult inverse operation on a vector @p X (of internally
+ * defined type VectorType) and store the result in the vector @p Y.
+ *
+ * In practise, this function is only called from a Trilinos solver if
+ * the wrapped object is to act as a preconditioner.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ *
+ * @note This function will only be operable if the payload has been
+ * initalised with an InverseOperator, or is a wrapper to a preconditioner.
+ * If not, then using this function will lead to an error being thrown.
+ * @note The intended operation depends on the status of the internal
+ * transpose flag. If this flag is set to true, the result will be
+ * the equivalent of performing a Tvmult operation.
+ */
+ virtual int
+ ApplyInverse(const VectorType &Y,
+ VectorType &X) const override;
+//@}
+
+ /**
+ * @name Additional Epetra_Operator functionality
+ */
+//@{
+
+ /**
+ * Returns a label to describe this class.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ */
+ virtual const char *
+ Label () const override;
+
+ /**
+ * Returns a reference to the underlying MPI communicator for
+ * this object.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ */
+ virtual const Epetra_Comm &
+ Comm () const override;
+
+ /**
+ * Return the partitioning of the domain space of this matrix, i.e., the
+ * partitioning of the vectors this matrix has to be multiplied with.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ */
+ virtual const Epetra_Map &
+ OperatorDomainMap () const override;
+
+ /**
+ * Return the partitioning of the range space of this matrix, i.e., the
+ * partitioning of the vectors that are result from matrix-vector
+ * products.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ */
+ virtual const Epetra_Map &
+ OperatorRangeMap () const override;
+//@}
+
+ private:
+
+ /**
+ * A flag recording whether the operator is to perform standard
+ * matrix-vector multiplication, or the transpose operation.
+ */
+ bool use_transpose;
+
+ /**
+ * Internal communication pattern in case the matrix needs to be copied
+ * from deal.II format.
+ */
+#ifdef DEAL_II_WITH_MPI
+ Epetra_MpiComm communicator;
+#else
+ Epetra_SerialComm communicator;
+#endif
+
+ /**
+ * Epetra_Map that sets the partitioning of the domain space of
+ * this operator.
+ */
+ Epetra_Map domain_map;
+
+ /**
+ * Epetra_Map that sets the partitioning of the range space of
+ * this operator.
+ */
+ Epetra_Map range_map;
+
+ /**
+ * Returns a flag that describes whether this operator can return the
+ * computation of the infinity norm. Since in general this is not the
+ * case, this always returns a negetive result.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ */
+ virtual bool
+ HasNormInf () const override;
+
+ /**
+ * Returns the infinity norm of this operator.
+ * Throws an error since, in general, we cannot compute this value.
+ *
+ * This overloads the same function from the Trilinos class
+ * Epetra_Operator.
+ */
+ virtual double
+ NormInf () const override;
+ };
+
+ /**
+ * Returns an operator that returns a payload configured to support the
+ * addition of two LinearOperators
+ */
+ TrilinosPayload operator+(const TrilinosPayload &first_op,
+ const TrilinosPayload &second_op);
+
+ /**
+ * Returns an operator that returns a payload configured to support the
+ * multiplication of two LinearOperators
+ */
+ TrilinosPayload operator*(const TrilinosPayload &first_op,
+ const TrilinosPayload &second_op);
+
+ } /* namespace LinearOperator */
+ } /* namespace internal */
+
+#endif // DEAL_II_WITH_CXX11
-// -------------------------- inline and template functions ----------------------
+// -------------------------- inline and template functions ----------------------
#ifndef DOXYGEN
}
+#ifdef DEAL_II_WITH_CXX11
+ namespace internal
+ {
+ namespace LinearOperator
+ {
+ template <typename Solver, typename Preconditioner>
+ TrilinosPayload
+ TrilinosPayload::inverse_payload (
+ Solver &solver,
+ const Preconditioner &preconditioner) const
+ {
+ const auto &payload = *this;
+
+ TrilinosPayload return_op(payload);
+
+ // Capture by copy so the payloads are always valid
+
+ return_op.inv_vmult = [payload,&solver,&preconditioner](
+ TrilinosPayload::Domain &tril_dst, const TrilinosPayload::Range &tril_src
+ )
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(payload,
+ tril_src, tril_dst,
+ !payload.UseTranspose());
+ solver.solve(payload, tril_dst, tril_src, preconditioner);
+ };
+
+ return_op.inv_Tvmult = [payload,&solver,&preconditioner](
+ TrilinosPayload::Range &tril_dst, const TrilinosPayload::Domain &tril_src
+ )
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(payload,
+ tril_src, tril_dst,
+ payload.UseTranspose());
+
+ const_cast<TrilinosPayload &>(payload).transpose();
+ solver.solve(payload, tril_dst, tril_src, preconditioner);
+ const_cast<TrilinosPayload &>(payload).transpose();
+ };
+
+ // If the input operator is already setup for transpose operations, then
+ // we must do similar with its inverse.
+ if (return_op.UseTranspose() == true)
+ std::swap(return_op.inv_vmult,
+ return_op.inv_Tvmult);
+
+ return return_op;
+ }
+ } // namespace LinearOperator
+ } // namespace internal
+#endif // DEAL_II_WITH_CXX11
#endif // DOXYGEN
-}
+} /* namespace TrilinosWrappers */
DEAL_II_NAMESPACE_CLOSE
# include <deal.II/lac/sparsity_tools.h>
# include <deal.II/lac/la_parallel_vector.h>
+#ifdef DEAL_II_WITH_CXX11
+# include <deal.II/lac/trilinos_precondition.h>
+#endif
+
DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
# include <Epetra_Export.h>
# include <ml_epetra_utils.h>
}
}
+#ifdef DEAL_II_WITH_CXX11
+
+namespace TrilinosWrappers
+{
+ namespace internal
+ {
+ namespace LinearOperator
+ {
+
+ TrilinosPayload::TrilinosPayload ()
+ : use_transpose (false),
+#ifdef DEAL_II_WITH_MPI
+ communicator (MPI_COMM_SELF),
+#endif
+ domain_map (IndexSet().make_trilinos_map(communicator.Comm())),
+ range_map (IndexSet().make_trilinos_map(communicator.Comm()))
+ {
+ vmult = [](Range &, const Domain &)
+ {
+ Assert(false,
+ ExcMessage("Uninitialized TrilinosPayload::vmult called"
+ "(Default constructor)"));
+ };
+
+ Tvmult = [](Domain &, const Range &)
+ {
+ Assert(false,
+ ExcMessage("Uninitialized TrilinosPayload::Tvmult called"
+ "(Default constructor)"));
+ };
+
+ inv_vmult = [](Domain &, const Range &)
+ {
+ Assert(false,
+ ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
+ "(Default constructor)"));
+ };
+
+ inv_Tvmult = [](Range &, const Domain &)
+ {
+ Assert(false,
+ ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
+ "(Default constructor)"));
+ };
+ }
+
+
+
+ TrilinosPayload::TrilinosPayload (const TrilinosWrappers::SparseMatrix &matrix_exemplar,
+ const TrilinosWrappers::SparseMatrix &matrix)
+ : use_transpose (matrix_exemplar.trilinos_matrix().UseTranspose()),
+ communicator (matrix_exemplar.get_mpi_communicator()),
+ domain_map (matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
+ range_map (matrix_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
+ {
+ vmult = [&matrix_exemplar,&matrix](Range &tril_dst, const Domain &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ Assert (matrix.trilinos_matrix().Filled(), TrilinosWrappers::SparseMatrix::ExcMatrixNotCompressed());
+ internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
+ tril_src, tril_dst,
+ matrix_exemplar.trilinos_matrix().UseTranspose());
+ internal::check_vector_map_equality(matrix.trilinos_matrix(),
+ tril_src, tril_dst,
+ matrix.trilinos_matrix().UseTranspose());
+
+ const int ierr = matrix.trilinos_matrix().Apply (tril_src, tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ };
+
+ Tvmult = [&matrix_exemplar,&matrix](Domain &tril_dst, const Range &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ Assert (matrix.trilinos_matrix().Filled(), TrilinosWrappers::SparseMatrix::ExcMatrixNotCompressed());
+ internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
+ tril_src, tril_dst,
+ !matrix_exemplar.trilinos_matrix().UseTranspose());
+ internal::check_vector_map_equality(matrix.trilinos_matrix(),
+ tril_src, tril_dst,
+ !matrix.trilinos_matrix().UseTranspose());
+
+ Epetra_CrsMatrix &tril_mtrx_non_const = const_cast<Epetra_CrsMatrix &>(matrix.trilinos_matrix());
+ tril_mtrx_non_const.SetUseTranspose(!matrix.trilinos_matrix().UseTranspose());
+ const int ierr = matrix.trilinos_matrix().Apply (tril_src, tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ tril_mtrx_non_const.SetUseTranspose(!matrix.trilinos_matrix().UseTranspose());
+ };
+
+ inv_vmult = [](Domain &, const Range &)
+ {
+ Assert(false,
+ ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
+ "(Matrix constructor with matrix exemplar)"));
+ };
+
+ inv_Tvmult = [](Range &, const Domain &)
+ {
+ Assert(false,
+ ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
+ "(Matrix constructor with matrix exemplar)"));
+ };
+ }
+
+
+
+ TrilinosPayload::TrilinosPayload (const TrilinosWrappers::SparseMatrix &matrix_exemplar,
+ const TrilinosWrappers::PreconditionBase &preconditioner)
+ : use_transpose (matrix_exemplar.trilinos_matrix().UseTranspose()),
+ communicator (matrix_exemplar.get_mpi_communicator()),
+ domain_map (matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
+ range_map (matrix_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
+ {
+ vmult = [&matrix_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
+ tril_src, tril_dst,
+ matrix_exemplar.trilinos_matrix().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ preconditioner.trilinos_operator().UseTranspose());
+
+ const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ };
+
+ Tvmult = [&matrix_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
+ tril_src, tril_dst,
+ !matrix_exemplar.trilinos_matrix().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ !preconditioner.trilinos_operator().UseTranspose());
+
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ };
+
+ inv_vmult = [&matrix_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
+ tril_src, tril_dst,
+ !matrix_exemplar.trilinos_matrix().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ !preconditioner.trilinos_operator().UseTranspose());
+
+ const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ };
+
+ inv_Tvmult = [&matrix_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
+ tril_src, tril_dst,
+ matrix_exemplar.trilinos_matrix().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ preconditioner.trilinos_operator().UseTranspose());
+
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ };
+ }
+
+
+
+ TrilinosPayload::TrilinosPayload (const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
+ const TrilinosWrappers::PreconditionBase &preconditioner)
+ : use_transpose (preconditioner_exemplar.trilinos_operator().UseTranspose()),
+ communicator (preconditioner_exemplar.get_mpi_communicator()),
+ domain_map (preconditioner_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
+ range_map (preconditioner_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
+ {
+ vmult = [&preconditioner_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
+ tril_src, tril_dst,
+ preconditioner_exemplar.trilinos_operator().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ preconditioner.trilinos_operator().UseTranspose());
+
+ const int ierr = preconditioner.trilinos_operator().Apply (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ };
+
+ Tvmult = [&preconditioner_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
+ tril_src, tril_dst,
+ !preconditioner_exemplar.trilinos_operator().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ !preconditioner.trilinos_operator().UseTranspose());
+
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ const int ierr = preconditioner.trilinos_operator().Apply (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ };
+
+ inv_vmult = [&preconditioner_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
+ tril_src, tril_dst,
+ !preconditioner_exemplar.trilinos_operator().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ !preconditioner.trilinos_operator().UseTranspose());
+
+ const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ };
+
+ inv_Tvmult = [&preconditioner_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
+ {
+ // Duplicated from TrilinosWrappers::PreconditionBase::vmult
+ // as well as from TrilinosWrappers::SparseMatrix::Tvmult
+ Assert (&tril_src != &tril_dst, TrilinosWrappers::SparseMatrix::ExcSourceEqualsDestination());
+ internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
+ tril_src, tril_dst,
+ preconditioner_exemplar.trilinos_operator().UseTranspose());
+ internal::check_vector_map_equality(preconditioner.trilinos_operator(),
+ tril_src, tril_dst,
+ preconditioner.trilinos_operator().UseTranspose());
+
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
+ };
+ }
+
+
+
+ TrilinosPayload::TrilinosPayload (const TrilinosPayload &payload)
+ : vmult (payload.vmult),
+ Tvmult (payload.Tvmult),
+ inv_vmult (payload.inv_vmult),
+ inv_Tvmult (payload.inv_Tvmult),
+ use_transpose (payload.use_transpose),
+ communicator (payload.communicator),
+ domain_map (payload.domain_map),
+ range_map (payload.range_map)
+ { }
+
+
+
+ // Composite copy constructor
+ // This is required for PackagedOperations
+ TrilinosPayload::TrilinosPayload (const TrilinosPayload &first_op,
+ const TrilinosPayload &second_op)
+ : use_transpose (false), // The combination of operators provides the exact definition of the operation
+ communicator (first_op.communicator),
+ domain_map (second_op.domain_map),
+ range_map (first_op.range_map)
+ { }
+
+
+
+ TrilinosPayload
+ TrilinosPayload::transpose_payload () const
+ {
+ TrilinosPayload return_op (*this);
+ return_op.transpose();
+ return return_op;
+ }
+
+
+
+ IndexSet
+ TrilinosPayload::locally_owned_domain_indices () const
+ {
+ return IndexSet(domain_map);
+ }
+
+
+
+ IndexSet
+ TrilinosPayload::locally_owned_range_indices () const
+ {
+ return IndexSet(range_map);
+ }
+
+
+
+ MPI_Comm
+ TrilinosPayload::get_mpi_communicator () const
+ {
+ return communicator.Comm();
+ }
+
+
+
+ void
+ TrilinosPayload::transpose ()
+ {
+ SetUseTranspose(!use_transpose);
+ }
+
+
+
+ bool
+ TrilinosPayload::UseTranspose () const
+ {
+ return use_transpose;
+ }
+
+
+
+ int
+ TrilinosPayload::SetUseTranspose (bool UseTranspose)
+ {
+ if (use_transpose != UseTranspose)
+ {
+ use_transpose = UseTranspose;
+ std::swap(domain_map,range_map);
+ std::swap(vmult,Tvmult);
+ std::swap(inv_vmult,inv_Tvmult);
+ }
+ return 0;
+ }
+
+
+
+ int
+ TrilinosPayload::Apply(const VectorType &X,
+ VectorType &Y) const
+ {
+ // The transposedness of the operations is taken care of
+ // when we hit the transpose flag.
+ vmult(Y,X);
+ return 0;
+ }
+
+
+
+ int
+ TrilinosPayload::ApplyInverse(const VectorType &X,
+ VectorType &Y) const
+ {
+ // The transposedness of the operations is taken care of
+ // when we hit the transpose flag.
+ inv_vmult(Y,X);
+ return 0;
+ }
+
+
+
+ const char *
+ TrilinosPayload::Label () const
+ {
+ return "TrilinosPayload";
+ }
+
+
+
+ const Epetra_Comm &
+ TrilinosPayload::Comm () const
+ {
+ return communicator;
+ }
+
+
+
+ const Epetra_Map &
+ TrilinosPayload::OperatorDomainMap () const
+ {
+ return domain_map;
+ }
+
+
+
+ const Epetra_Map &
+ TrilinosPayload::OperatorRangeMap () const
+ {
+ return range_map;
+ }
+
+
+
+ bool
+ TrilinosPayload::HasNormInf () const
+ {
+ return false;
+ }
+
+
+
+ double
+ TrilinosPayload::NormInf () const
+ {
+ AssertThrow(false, ExcNotImplemented());
+ return 0.0;
+ }
+
+
+
+ TrilinosPayload operator+(const TrilinosPayload &first_op,
+ const TrilinosPayload &second_op)
+ {
+ typedef typename TrilinosPayload::Domain Domain;
+ typedef typename TrilinosPayload::Range Range;
+ typedef typename TrilinosPayload::VectorType Intermediate;
+ typedef TrilinosWrappers::MPI::Vector GVMVectorType;
+
+ Assert(first_op.locally_owned_domain_indices() == second_op.locally_owned_domain_indices(),
+ ExcMessage("Operators are set to work on incompatible IndexSets."));
+ Assert(first_op.locally_owned_range_indices() == second_op.locally_owned_range_indices(),
+ ExcMessage("Operators are set to work on incompatible IndexSets."));
+
+ TrilinosPayload return_op (first_op, second_op);
+
+ // Capture by copy so the payloads are always valid
+ return_op.vmult = [first_op, second_op](Range &tril_dst,
+ const Domain &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ second_op.Apply(tril_src, tril_int);
+ first_op.Apply(tril_src, tril_dst);
+ const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+
+ vector_memory.free(i);
+ };
+
+ return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
+ const Range &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ // We must first transpose the operators to get the right IndexSets
+ // for the input, intermediate and result vectors
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ second_op.Apply(tril_src, tril_int);
+ first_op.Apply(tril_src, tril_dst);
+ const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+
+ // Reset transpose flag
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ vector_memory.free(i);
+ };
+
+ return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
+ const Range &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ second_op.ApplyInverse(tril_src, tril_int);
+ first_op.ApplyInverse(tril_src, tril_dst);
+ const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+
+ vector_memory.free(i);
+ };
+
+ return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
+ const Domain &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ // We must first transpose the operators to get the right IndexSets
+ // for the input, intermediate and result vectors
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ second_op.ApplyInverse(tril_src, tril_int);
+ first_op.ApplyInverse(tril_src, tril_dst);
+ const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+
+ // Reset transpose flag
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ vector_memory.free(i);
+ };
+
+ return return_op;
+ }
+
+
+
+ TrilinosPayload operator*(const TrilinosPayload &first_op,
+ const TrilinosPayload &second_op)
+ {
+ typedef typename TrilinosPayload::Domain Domain;
+ typedef typename TrilinosPayload::Range Range;
+ typedef typename TrilinosPayload::VectorType Intermediate;
+ typedef TrilinosWrappers::MPI::Vector GVMVectorType;
+
+ AssertThrow(first_op.locally_owned_domain_indices() == second_op.locally_owned_range_indices(),
+ ExcMessage("Operators are set to work on incompatible IndexSets."));
+
+ TrilinosPayload return_op (first_op, second_op);
+
+ // Capture by copy so the payloads are always valid
+ return_op.vmult = [first_op, second_op](Range &tril_dst,
+ const Domain &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ second_op.Apply(tril_src, tril_int);
+ first_op.Apply(tril_int, tril_dst);
+
+ vector_memory.free(i);
+ };
+
+ return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
+ const Range &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ // We must first transpose the operators to get the right IndexSets
+ // for the input, intermediate and result vectors
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // Apply the operators in the reverse order to vmult
+ first_op.Apply(tril_src, tril_int);
+ second_op.Apply(tril_int, tril_dst);
+
+ // Reset transpose flag
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ vector_memory.free(i);
+ };
+
+ return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
+ const Range &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // Apply the operators in the reverse order to vmult
+ // and the same order as Tvmult
+ first_op.ApplyInverse(tril_src, tril_int);
+ second_op.ApplyInverse(tril_int, tril_dst);
+
+ vector_memory.free(i);
+ };
+
+ return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
+ const Domain &tril_src)
+ {
+ // Duplicated from LinearOperator::operator*
+ // TODO: Template the constructor on GrowingVectorMemory vector type?
+ static GrowingVectorMemory<GVMVectorType> vector_memory;
+ GVMVectorType *i = vector_memory.alloc();
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ // We must first transpose the operators to get the right IndexSets
+ // for the input, intermediate and result vectors
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ // Initialise intermediate vector
+ const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
+ i->reinit(IndexSet(first_op_init_map),
+ first_op.get_mpi_communicator(),
+ /*bool omit_zeroing_entries =*/true);
+
+ // Duplicated from TrilinosWrappers::SparseMatrix::vmult
+ const size_type i_local_size = i->end() - i->begin();
+ AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
+ const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
+ AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
+ (void)second_op_init_map;
+ Intermediate tril_int (View, first_op_init_map,
+ const_cast<TrilinosScalar *>(i->begin()),
+ i_local_size, 1);
+
+ // These operators may themselves be transposed or not, so we let them
+ // decide what the intended outcome is
+ // Apply the operators in the reverse order to Tvmult
+ // and the same order as vmult
+ second_op.ApplyInverse(tril_src, tril_int);
+ first_op.ApplyInverse(tril_int, tril_dst);
+
+ // Reset transpose flag
+ const_cast<TrilinosPayload &>(first_op).transpose();
+ const_cast<TrilinosPayload &>(second_op).transpose();
+
+ vector_memory.free(i);
+ };
+
+ return return_op;
+ }
+
+ } /* namespace LinearOperator */
+ } /* namespace internal */
+} /* namespace TrilinosWrappers */
+
+#endif // DEAL_II_WITH_CXX11
// explicit instantiations
template void
SparseMatrix::Tvmult_add (dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
+
}
DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Test internal preconditioner and solver options
+
+#include "../tests.h"
+
+#include <deal.II/lac/trilinos_linear_operator.h>
+#include <deal.II/lac/packaged_operation.h>
+
+#include <deal.II/lac/trilinos_block_sparse_matrix.h>
+#include <deal.II/lac/trilinos_block_vector.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+
+
+using namespace dealii;
+
+template<typename VECTOR>
+void
+print (const VECTOR &vec)
+{
+ for (types::global_dof_index i=0; i < vec.size(); ++i)
+ {
+ deallog << vec(i) << " ";
+ }
+ deallog << std::endl;
+}
+
+
+template<class PRECONDITIONER, class MATRIX, class VECTOR,
+ class ADDITIONAL_DATA = typename PRECONDITIONER::AdditionalData>
+void
+test_preconditioner (const MATRIX &A,
+ const VECTOR &b,
+ const ADDITIONAL_DATA &data = ADDITIONAL_DATA())
+{
+ const auto lo_A = linear_operator<VECTOR>(A);
+ // Note: The above should be equivalent to the following:
+ //
+ // typedef dealii::TrilinosWrappers::internal::LinearOperator::TrilinosPayload PAYLOAD;
+ // const auto lo_A = linear_operator<VECTOR,VECTOR,PAYLOAD>(A);
+
+ PRECONDITIONER preconditioner;
+ preconditioner.initialize(A, data);
+
+ typedef TrilinosWrappers::SolverCG SOLVER;
+ SolverControl solver_control (100, 1.0e-10);
+ SOLVER solver (solver_control);
+
+ // Exact inverse
+ const auto lo_A_inv = inverse_operator(lo_A, solver, preconditioner);
+ // Note: The above should be equivalent to the following:
+ //
+ // const auto lo_A_inv = inverse_operator<PAYLOAD,SOLVER,
+ // PRECONDITIONER,
+ // VECTOR,VECTOR>(lo_A,
+ // solver,
+ // preconditioner);
+
+ // Singular operation
+ {
+ deallog.push("S_Op");
+ const VECTOR x = lo_A_inv*b;
+ print(x);
+ deallog.pop();
+ }
+
+ // Composite operation
+ {
+ deallog.push("C_Op");
+ const VECTOR x = (lo_A_inv*lo_A*lo_A_inv)*b;
+ print(x);
+ deallog.pop();
+ }
+
+ // Approximate inverse
+ deallog.push("Approx");
+ {
+ // Using exemplar matrix
+ deallog.push("Exemp");
+ const auto lo_A_inv_approx = linear_operator<VECTOR,VECTOR>(A, preconditioner);
+ // Note: The above should be equivalent to the following:
+ //
+ // typedef dealii::TrilinosWrappers::internal::LinearOperator::TrilinosPayload PAYLOAD;
+ // const auto lo_A_inv_approx = linear_operator<VECTOR,VECTOR,PAYLOAD>(A, preconditioner);
+
+ // Singular operation
+ {
+ deallog.push("S_Op");
+ const VECTOR x_approx = lo_A_inv_approx*b;
+ print(x_approx);
+ deallog.pop();
+ }
+
+ // Composite operation
+ {
+ deallog.push("C_Op");
+ const VECTOR x_approx = (lo_A_inv_approx*lo_A*lo_A_inv_approx)*b;
+ print(x_approx);
+ deallog.pop();
+ }
+
+ deallog.pop();
+ }
+ {
+ // Stand-alone
+ deallog.push("S.A.");
+ typedef dealii::TrilinosWrappers::internal::LinearOperator::TrilinosPayload PAYLOAD;
+ const auto lo_A_inv_approx = linear_operator<VECTOR,VECTOR,PAYLOAD>(preconditioner);
+
+ // Singular operation
+ {
+ deallog.push("S_Op");
+ const VECTOR x_approx = lo_A_inv_approx*b;
+ print(x_approx);
+ deallog.pop();
+ }
+
+ // Composite operation
+ {
+ deallog.push("C_Op");
+ const VECTOR x_approx = (lo_A_inv_approx*lo_A*lo_A_inv_approx)*b;
+ print(x_approx);
+ deallog.pop();
+ }
+
+ deallog.pop();
+ }
+ deallog.pop();
+}
+
+template<class SOLVER, class MATRIX, class VECTOR>
+void
+test_solver (const MATRIX &A,
+ const VECTOR &b)
+{
+ const auto lo_A = linear_operator<VECTOR>(A);
+ // Note: The above should be equivalent to the following:
+ //
+ // typedef dealii::TrilinosWrappers::internal::LinearOperator::TrilinosPayload PAYLOAD;
+ // const auto lo_A = linear_operator<VECTOR,VECTOR,PAYLOAD>(A);
+
+ SolverControl solver_control (100, 1.0e-10);
+ SOLVER solver (solver_control);
+
+ typedef TrilinosWrappers::PreconditionJacobi PRECONDITIONER;
+ PRECONDITIONER preconditioner;
+ preconditioner.initialize(A);
+
+ {
+ const auto lo_A_inv = inverse_operator(lo_A, solver, preconditioner);
+ // Note: The above should be equivalent to the following:
+ //
+ // const auto lo_A_inv = inverse_operator<PAYLOAD,SOLVER,
+ // PRECONDITIONER,
+ // VECTOR,VECTOR>(lo_A,
+ // solver,
+ // preconditioner);
+
+ // Singular operation
+ {
+ deallog.push("S_Op");
+ const VECTOR x_approx = lo_A_inv*b;
+ print(x_approx);
+ deallog.pop();
+ }
+
+ // Composite operation
+ {
+ deallog.push("C_Op");
+ const VECTOR x_approx = (lo_A_inv*lo_A*lo_A_inv)*b;
+ print(x_approx);
+ deallog.pop();
+ }
+ }
+
+ // Composite operation 2
+ {
+ deallog.push("C_Op2");
+ SolverControl solver_control_1 (100, 1.0e-10);
+ SOLVER solver_1 (solver_control_1);
+ const auto lo_A_inv_1 = inverse_operator(lo_A, solver_1, preconditioner);
+ SolverControl solver_control_2 (100, 1.0e-10);
+ SOLVER solver_2 (solver_control_2);
+ const auto lo_A_inv_2 = inverse_operator(lo_A, solver_2, preconditioner);
+ const VECTOR x_approx = (lo_A_inv_2*lo_A*lo_A_inv_1)*b;
+ print(x_approx);
+ deallog.pop();
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.depth_console(0);
+ deallog << std::setprecision(10);
+
+ // TrilinosWrappers::SparseMatrix
+ {
+ const unsigned int rc=10;
+ TrilinosWrappers::SparsityPattern sparsity_pattern (rc, rc, /*n_entries_per_row =*/ 1);
+ for (unsigned int i=0; i < rc; ++i)
+ {
+ sparsity_pattern.add(i,i);
+ }
+ sparsity_pattern.compress();
+
+ TrilinosWrappers::SparseMatrix A (sparsity_pattern);
+ TrilinosWrappers::Vector b (A.domain_partitioner());
+ TrilinosWrappers::MPI::Vector c (A.domain_partitioner());
+ for (unsigned int i=0; i < rc; ++i)
+ {
+ A.set(i,i,2.0);
+ b(i) = i;
+ }
+
+ // === PRECONDITIONERS ===
+ deallog << "PRECONDITIONERS" << std::endl;
+ deallog.push("Preconditioners");
+
+ {
+ deallog.push("PreconditionAMG");
+ typedef TrilinosWrappers::PreconditionAMG PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionAMGMueLu");
+ typedef TrilinosWrappers::PreconditionAMGMueLu PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionChebyshev");
+ typedef TrilinosWrappers::PreconditionChebyshev PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionIC");
+ typedef TrilinosWrappers::PreconditionIC PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionIdentity");
+ typedef TrilinosWrappers::PreconditionIdentity PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionILU");
+ typedef TrilinosWrappers::PreconditionILU PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionILUT");
+ typedef TrilinosWrappers::PreconditionILUT PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionJacobi");
+ typedef TrilinosWrappers::PreconditionJacobi PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionSOR");
+ typedef TrilinosWrappers::PreconditionSOR PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("PreconditionSSOR");
+ typedef TrilinosWrappers::PreconditionSSOR PREC;
+ test_preconditioner<PREC>(A, b);
+ test_preconditioner<PREC>(A, c);
+ deallog.pop();
+ }
+
+ deallog.pop();
+
+ // === SOLVERS ===
+ deallog << std::endl;
+ deallog << "SOLVERS" << std::endl;
+ deallog.push("Solvers");
+
+ {
+ try
+ {
+ // TODO: AztecOO::Iterate error code -3: loss of precision
+ // Note: Even though this test fails, we would still like to check
+ // that we can build LinearOperators with this solver
+ deallog.push("SolverBicgstab");
+ typedef TrilinosWrappers::SolverBicgstab SLVR;
+ test_solver<SLVR> (A, b);
+ deallog.pop();
+ }
+ catch (...)
+ {
+ deallog.pop();
+ deallog << "Known AztecOO error in SolverBicgstab" << std::endl;
+ deallog.pop();
+ }
+ }
+
+ {
+ deallog.push("SolverCG");
+ typedef TrilinosWrappers::SolverCG SLVR;
+ test_solver<SLVR> (A, b);
+ deallog.pop();
+ }
+
+ {
+ deallog.push("SolverCGS");
+ typedef TrilinosWrappers::SolverCGS SLVR;
+ test_solver<SLVR> (A, b);
+ deallog.pop();
+ }
+
+ // The TrilinosWrappers::SolverDirect class is not (yet) compatible with
+ // the LinearOperator class.
+ // {
+ // deallog.push("SolverDirect");
+ // typedef TrilinosWrappers::SolverDirect SLVR;
+ // // test_solver<SLVR> (A, b);
+ //
+ // typedef dealii::TrilinosWrappers::Vector VectorType;
+ // typedef dealii::TrilinosWrappers::internal::LinearOperator::TrilinosPayload PayloadType;
+ // // TODO: Full template expansion required for composite operator. Can one prevent this?
+ // // i.e. is 'const auto lo_A = linear_operator<VectorType>(A);' possible?
+ // const auto lo_A = linear_operator<VectorType,VectorType,PayloadType>(A);
+ //
+ // SolverControl solver_control (100, 1.0e-10);
+ // SLVR solver (solver_control);
+ // solver.initialize(A);
+ //
+ // // TODO: Full template expansion required for composite operator. Can one prevent this?
+ // // i.e. is 'const auto lo_A_inv = linear_operator<VectorType>(solver);' possible?
+ // const auto lo_A_inv = linear_operator<VectorType,VectorType,PayloadType>(solver);
+ //
+ // // Singular operation
+ // {
+ // deallog.push("S_Op");
+ // const TrilinosWrappers::Vector x_approx = lo_A_inv*b;
+ // print(x_approx);
+ // deallog.pop();
+ // }
+ //
+ // // Composite operation
+ // {
+ // deallog.push("C_Op");
+ // const TrilinosWrappers::Vector x_approx = (lo_A_inv*lo_A*lo_A_inv)*b;
+ // print(x_approx);
+ // deallog.pop();
+ // }
+ //
+ // deallog.pop();
+ // }
+
+ {
+ deallog.push("SolverGMRES");
+ typedef TrilinosWrappers::SolverGMRES SLVR;
+ test_solver<SLVR> (A, b);
+ deallog.pop();
+ }
+
+ {
+ try
+ {
+ // TODO: AztecOO::Iterate error code -2: numerical breakdown
+ // Note: Even though this test fails, we would still like to check
+ // that we can build LinearOperators with this solver
+ deallog.push("SolverTFQMR");
+ typedef TrilinosWrappers::SolverTFQMR SLVR;
+ test_solver<SLVR> (A, b);
+ deallog.pop();
+ }
+ catch (...)
+ {
+ deallog.pop();
+ deallog << "Known AztecOO error in SolverTFQMR" << std::endl;
+ deallog.pop();
+ }
+ }
+
+ deallog.pop();
+ deallog << "TrilinosWrappers::SparseMatrix OK" << std::endl;
+ } // TrilinosWrappers::SparseMatrix
+
+ // {
+ // // TODO: Implement these checks
+ // TrilinosWrappers::PreconditionBlockJacobi
+ // TrilinosWrappers::PreconditionBlockSOR
+ // TrilinosWrappers::PreconditionBlockSSOR
+ // TrilinosWrappers::PreconditionBlockwiseDirect
+ // }
+}
--- /dev/null
+
+DEAL:0::PRECONDITIONERS
+DEAL:0:Preconditioners:PreconditionAMG:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMG:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMG:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionAMGMueLu:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:S_Op::Convergence step 1 value 4.910148818e-15
+DEAL:0:Preconditioners:PreconditionChebyshev:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::Convergence step 1 value 4.910148818e-15
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::Convergence step 1 value 1.986027323e-15
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:Exemp:S_Op::0.000000000 0.08823529412 0.1764705882 0.2647058824 0.3529411765 0.4411764706 0.5294117647 0.6176470588 0.7058823529 0.7941176471
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:Exemp:C_Op::0.000000000 0.01557093426 0.03114186851 0.04671280277 0.06228373702 0.07785467128 0.09342560554 0.1089965398 0.1245674740 0.1401384083
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:S.A.:S_Op::0.000000000 0.08823529412 0.1764705882 0.2647058824 0.3529411765 0.4411764706 0.5294117647 0.6176470588 0.7058823529 0.7941176471
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:S.A.:C_Op::0.000000000 0.01557093426 0.03114186851 0.04671280277 0.06228373702 0.07785467128 0.09342560554 0.1089965398 0.1245674740 0.1401384083
+DEAL:0:Preconditioners:PreconditionChebyshev:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionChebyshev:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIC:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIC:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:Exemp:S_Op::0.000000000 1.000000000 2.000000000 3.000000000 4.000000000 5.000000000 6.000000000 7.000000000 8.000000000 9.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:Exemp:C_Op::0.000000000 2.000000000 4.000000000 6.000000000 8.000000000 10.00000000 12.00000000 14.00000000 16.00000000 18.00000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:S.A.:S_Op::0.000000000 1.000000000 2.000000000 3.000000000 4.000000000 5.000000000 6.000000000 7.000000000 8.000000000 9.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:S.A.:C_Op::0.000000000 2.000000000 4.000000000 6.000000000 8.000000000 10.00000000 12.00000000 14.00000000 16.00000000 18.00000000
+DEAL:0:Preconditioners:PreconditionIdentity:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionIdentity:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILU:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILU:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionILUT:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionILUT:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionJacobi:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionJacobi:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSOR:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSOR:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:Exemp:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:Exemp:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:S.A.:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:S.A.:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Preconditioners:PreconditionSSOR:S_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:S_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::Starting value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::Convergence step 0 value 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:Exemp:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:Exemp:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:S.A.:S_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0:Preconditioners:PreconditionSSOR:Approx:S.A.:C_Op::0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
+DEAL:0::
+DEAL:0::SOLVERS
+DEAL:0:Solvers:SolverBicgstab::Known AztecOO error in SolverBicgstab
+DEAL:0:Solvers:SolverCG:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCG:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverCG:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCG:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCG:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverCG:C_Op2::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCG:C_Op2::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCG:C_Op2::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverCGS:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCGS:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverCGS:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCGS:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCGS:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverCGS:C_Op2::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCGS:C_Op2::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverCGS:C_Op2::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverGMRES:S_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverGMRES:S_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverGMRES:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverGMRES:C_Op::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverGMRES:C_Op::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverGMRES:C_Op2::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverGMRES:C_Op2::Convergence step 1 value 0.000000000
+DEAL:0:Solvers:SolverGMRES:C_Op2::0.000000000 0.5000000000 1.000000000 1.500000000 2.000000000 2.500000000 3.000000000 3.500000000 4.000000000 4.500000000
+DEAL:0:Solvers:SolverTFQMR::Known AztecOO error in SolverTFQMR
+DEAL:0::TrilinosWrappers::SparseMatrix OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// tests Trilinos direct solvers on a 2D Poisson equation for linear elements
+// Note: This test is a modified version of tests/trilinos/direct_solver.cc
+
+#include "../tests.h"
+
+#include <deal.II/lac/trilinos_linear_operator.h>
+#include <deal.II/lac/packaged_operation.h>
+
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/base/function.h>
+#include <deal.II/grid/tria.h>
+
+#include <fstream>
+#include <iomanip>
+
+
+template <int dim>
+class Step4
+{
+public:
+ Step4 ();
+ void run ();
+
+private:
+ void make_grid ();
+ void setup_system();
+ void assemble_system ();
+ void solve ();
+
+ Triangulation<dim> triangulation;
+ FE_Q<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ ConstraintMatrix constraints;
+
+ TrilinosWrappers::SparseMatrix system_matrix;
+ TrilinosWrappers::Vector solution;
+ TrilinosWrappers::Vector system_rhs;
+};
+
+
+template <int dim>
+class RightHandSide : public Function<dim>
+{
+public:
+ RightHandSide () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+template <int dim>
+class BoundaryValues : public Function<dim>
+{
+public:
+ BoundaryValues () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+
+template <int dim>
+double RightHandSide<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ double return_value = 0;
+ for (unsigned int i=0; i<dim; ++i)
+ return_value += 4*std::pow(p(i), 4);
+
+ return return_value;
+}
+
+
+
+template <int dim>
+double BoundaryValues<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ return p.square();
+}
+
+
+
+template <int dim>
+Step4<dim>::Step4 ()
+ :
+ fe (1),
+ dof_handler (triangulation)
+{}
+
+
+template <int dim>
+void Step4<dim>::make_grid ()
+{
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (6);
+}
+
+
+
+template <int dim>
+void Step4<dim>::setup_system ()
+{
+ dof_handler.distribute_dofs (fe);
+
+ constraints.clear();
+ std::map<unsigned int,double> boundary_values;
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ BoundaryValues<dim>(),
+ constraints);
+ constraints.close();
+
+ DynamicSparsityPattern c_sparsity(dof_handler.n_dofs());
+ DoFTools::make_sparsity_pattern (dof_handler, c_sparsity, constraints, false);
+ system_matrix.reinit (c_sparsity);
+
+ solution.reinit (dof_handler.n_dofs());
+ system_rhs.reinit (dof_handler.n_dofs());
+}
+
+
+template <int dim>
+void Step4<dim>::assemble_system ()
+{
+ QGauss<dim> quadrature_formula(fe.degree+1);
+
+ const RightHandSide<dim> right_hand_side;
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+
+ for (; cell!=endc; ++cell)
+ {
+ fe_values.reinit (cell);
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad (i, q_point) *
+ fe_values.shape_grad (j, q_point) *
+ fe_values.JxW (q_point));
+
+ cell_rhs(i) += (fe_values.shape_value (i, q_point) *
+ right_hand_side.value (fe_values.quadrature_point (q_point)) *
+ fe_values.JxW (q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global(cell_matrix, cell_rhs,
+ local_dof_indices,
+ system_matrix, system_rhs);
+ }
+ system_matrix.compress(VectorOperation::add);
+}
+
+
+
+template <int dim>
+void Step4<dim>::solve ()
+{
+ // Compute 'reference' solution with direct solver
+ {
+ deallog.push("DirectKLU");
+ solution = 0;
+ TrilinosWrappers::SolverDirect::AdditionalData data;
+ data.solver_type = "Amesos_Klu";
+ SolverControl solver_control (1000, 1e-10);
+ TrilinosWrappers::SolverDirect solver(solver_control, data);
+ solver.solve (system_matrix, solution, system_rhs);
+ deallog.pop();
+ }
+
+ typedef TrilinosWrappers::Vector VectorType;
+ VectorType output(solution);
+ {
+ deallog.push("Trilinos_CG_SSOR");
+ output = 0;
+ SolverControl solver_control (1000, 1e-12);
+ TrilinosWrappers::SolverCG solver (solver_control);
+ TrilinosWrappers::PreconditionSSOR preconditioner;
+ preconditioner.initialize(system_matrix);
+ solver.solve (system_matrix, output, system_rhs,
+ preconditioner);
+ output -= solution;
+ deallog << "Norm of error in standard solve: " << output.l2_norm()
+ << std::endl;
+ deallog.pop();
+ }
+
+ {
+ deallog.push("LinearOperator_Trilinos_CG_SSOR");
+ output = 0;
+ SolverControl solver_control (1000, 1e-12);
+ TrilinosWrappers::SolverCG solver (solver_control);
+ TrilinosWrappers::PreconditionSSOR preconditioner;
+ preconditioner.initialize(system_matrix);
+ const auto lo_A = linear_operator<VectorType>(system_matrix);
+ const auto lo_A_inv = inverse_operator(lo_A, solver, preconditioner);
+ output = lo_A_inv*system_rhs;
+ output -= solution;
+ deallog << "Norm of error in LinearOperator solve: " << output.l2_norm()
+ << std::endl;
+ deallog.pop();
+ }
+
+}
+
+
+
+template <int dim>
+void Step4<dim>::run()
+{
+ make_grid();
+ setup_system();
+ assemble_system();
+ solve();
+}
+
+
+int main (int argc, char **argv)
+{
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.threshold_double(1.e-10);
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+
+ try
+ {
+ Step4<2> test;
+ test.run();
+ }
+ catch (std::exception &exc)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+
+DEAL:DirectKLU::Starting value 0
+DEAL:DirectKLU::Convergence step 0 value 0
+DEAL:Trilinos_CG_SSOR::Convergence step 86 value 0
+DEAL:Trilinos_CG_SSOR::Norm of error in standard solve: 0
+DEAL:LinearOperator_Trilinos_CG_SSOR::Convergence step 86 value 0
+DEAL:LinearOperator_Trilinos_CG_SSOR::Norm of error in LinearOperator solve: 0
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2013 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// tests Trilinos direct solvers on a 2D Poisson equation for linear elements
+// Note: This test is a modified version of tests/trilinos/direct_solver_2.cc
+
+#include "../tests.h"
+
+#include <deal.II/lac/trilinos_linear_operator.h>
+#include <deal.II/lac/packaged_operation.h>
+
+#include <deal.II/base/function.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/conditional_ostream.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/lac/full_matrix.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/sparsity_tools.h>
+
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+
+#include <deal.II/numerics/vector_tools.h>
+
+#include <fstream>
+#include <iomanip>
+
+using namespace dealii;
+
+template <int dim>
+class Step4
+{
+public:
+ Step4 ();
+ void run ();
+
+private:
+ void make_grid ();
+ void setup_system();
+ void assemble_system ();
+ void solve ();
+
+ parallel::distributed::Triangulation<dim> triangulation;
+ FE_Q<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ ConstraintMatrix constraints;
+ SparsityPattern sparsity_pattern;
+
+ TrilinosWrappers::SparseMatrix system_matrix;
+
+ TrilinosWrappers::MPI::Vector solution;
+ TrilinosWrappers::MPI::Vector system_rhs;
+};
+
+
+template <int dim>
+class RightHandSide : public Function<dim>
+{
+public:
+ RightHandSide () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+template <int dim>
+class RightHandSideTwo : public Function<dim>
+{
+public:
+ RightHandSideTwo () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+
+template <int dim>
+class BoundaryValues : public Function<dim>
+{
+public:
+ BoundaryValues () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+
+template <int dim>
+double RightHandSide<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ double return_value = 0;
+ for (unsigned int i=0; i<dim; ++i)
+ return_value += 2*std::pow(p(i), 2);
+
+ return return_value;
+}
+
+
+template <int dim>
+double RightHandSideTwo<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ double return_value = 0;
+ for (unsigned int i=0; i<dim; ++i)
+ return_value += 4*std::pow(p(i), 4);
+
+ return return_value;
+}
+
+
+template <int dim>
+double BoundaryValues<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ return p.square();
+}
+
+
+
+template <int dim>
+Step4<dim>::Step4 ()
+ :
+ triangulation(MPI_COMM_WORLD,
+ typename Triangulation<dim>::MeshSmoothing
+ (Triangulation<dim>::smoothing_on_refinement |
+ Triangulation<dim>::smoothing_on_coarsening)),
+ fe (1),
+ dof_handler (triangulation)
+{}
+
+
+template <int dim>
+void Step4<dim>::make_grid ()
+{
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (6);
+}
+
+
+
+template <int dim>
+void Step4<dim>::setup_system ()
+{
+ dof_handler.distribute_dofs (fe);
+
+ constraints.clear();
+ std::map<unsigned int,double> boundary_values;
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ BoundaryValues<dim>(),
+ constraints);
+ constraints.close();
+
+ IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
+ IndexSet locally_relevant_dofs;
+
+ DoFTools::extract_locally_relevant_dofs(dof_handler,
+ locally_relevant_dofs);
+
+
+ DynamicSparsityPattern dsp(dof_handler.n_dofs());
+ DoFTools::make_sparsity_pattern (dof_handler, dsp, constraints, false);
+ SparsityTools::distribute_sparsity_pattern(dsp,
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ MPI_COMM_WORLD,
+ locally_relevant_dofs);
+
+ system_matrix.reinit (locally_owned_dofs,
+ locally_owned_dofs,
+ dsp,
+ MPI_COMM_WORLD);
+
+ solution.reinit (locally_relevant_dofs,
+ MPI_COMM_WORLD);
+
+ system_rhs.reinit (locally_owned_dofs,
+ locally_relevant_dofs,
+ MPI_COMM_WORLD,
+ true);
+
+}
+
+
+template <int dim>
+void Step4<dim>::assemble_system ()
+{
+ QGauss<dim> quadrature_formula(fe.degree+1);
+
+ const RightHandSide<dim> right_hand_side;
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+
+ for (; cell!=endc; ++cell)
+ {
+ if (cell->is_locally_owned())
+ {
+ fe_values.reinit (cell);
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad (i, q_point) *
+ fe_values.shape_grad (j, q_point) *
+ fe_values.JxW (q_point));
+
+ cell_rhs(i) += (fe_values.shape_value (i, q_point) *
+ right_hand_side.value (fe_values.quadrature_point (q_point)) *
+ fe_values.JxW (q_point));
+
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global(cell_matrix, cell_rhs,
+ local_dof_indices,
+ system_matrix, system_rhs);
+ }
+ }
+ system_matrix.compress(VectorOperation::add);
+ system_rhs.compress(VectorOperation::add);
+}
+
+
+
+template <int dim>
+void Step4<dim>::solve ()
+{
+ typedef TrilinosWrappers::MPI::Vector VectorType;
+
+ // Compute 'reference' solution with direct solver
+ VectorType temp_solution(system_rhs);
+ {
+ deallog.push("DirectKLU");
+ temp_solution = 0;
+ TrilinosWrappers::SolverDirect::AdditionalData data;
+ data.solver_type = "Amesos_Klu";
+ SolverControl solver_control (1000, 1e-10);
+ TrilinosWrappers::SolverDirect solver(solver_control, data);
+ solver.solve (system_matrix, temp_solution, system_rhs);
+ constraints.distribute(temp_solution);
+ solution = temp_solution;
+ deallog.pop();
+ }
+
+ VectorType output(system_rhs);
+ {
+ deallog.push("Trilinos_CG_SSOR");
+ output = 0;
+ SolverControl solver_control (1000, 1e-12);
+ TrilinosWrappers::SolverCG solver (solver_control);
+ TrilinosWrappers::PreconditionSSOR preconditioner;
+ preconditioner.initialize(system_matrix);
+ solver.solve (system_matrix, output, system_rhs,
+ preconditioner);
+ constraints.distribute(output);
+ output -= temp_solution;
+ const double local_error = output.l2_norm();
+ const double global_error = std::sqrt(Utilities::MPI::sum(
+ local_error * local_error,
+ MPI_COMM_WORLD));
+ deallog << "Norm of error in standard solve: " << global_error
+ << std::endl;
+ deallog.pop();
+ }
+
+ {
+ deallog.push("LinearOperator_Trilinos_CG_SSOR");
+ output = 0;
+ SolverControl solver_control (1000, 1e-12);
+ TrilinosWrappers::SolverCG solver (solver_control);
+ TrilinosWrappers::PreconditionSSOR preconditioner;
+ preconditioner.initialize(system_matrix);
+ const auto lo_A = linear_operator<VectorType>(system_matrix);
+ const auto lo_A_inv = inverse_operator(lo_A, solver, preconditioner);
+ output = lo_A_inv*system_rhs;
+ constraints.distribute(output);
+ output -= temp_solution;
+ const double local_error = output.l2_norm();
+ const double global_error = std::sqrt(Utilities::MPI::sum(
+ local_error * local_error,
+ MPI_COMM_WORLD));
+ deallog << "Norm of error in LinearOperator solve: " << global_error
+ << std::endl;
+ deallog.pop();
+ }
+
+}
+
+
+
+template <int dim>
+void Step4<dim>::run()
+{
+ make_grid();
+ setup_system();
+ assemble_system();
+ solve();
+}
+
+
+int main (int argc, char **argv)
+{
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.threshold_double(1.e-10);
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+
+ try
+ {
+ Step4<2> test;
+ test.run();
+ }
+ catch (std::exception &exc)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+
+DEAL:DirectKLU::Starting value 0
+DEAL:DirectKLU::Convergence step 0 value 0
+DEAL:Trilinos_CG_SSOR::Convergence step 86 value 0
+DEAL:Trilinos_CG_SSOR::Norm of error in standard solve: 0
+DEAL:LinearOperator_Trilinos_CG_SSOR::Convergence step 86 value 0
+DEAL:LinearOperator_Trilinos_CG_SSOR::Norm of error in LinearOperator solve: 0
--- /dev/null
+
+DEAL:DirectKLU::Starting value 0
+DEAL:DirectKLU::Convergence step 0 value 0
+DEAL:Trilinos_CG_SSOR::Convergence step 105 value 0
+DEAL:Trilinos_CG_SSOR::Norm of error in standard solve: 0
+DEAL:LinearOperator_Trilinos_CG_SSOR::Convergence step 105 value 0
+DEAL:LinearOperator_Trilinos_CG_SSOR::Norm of error in LinearOperator solve: 0
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Test TrilinosPayload vmult and Tvmult operations for serial vectors,
+// specifically under conditions where the transpose flag is set
+
+#include "../tests.h"
+
+#include <deal.II/lac/trilinos_linear_operator.h>
+#include <deal.II/lac/packaged_operation.h>
+
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/block_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/base/function.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/lac/trilinos_block_sparse_matrix.h>
+#include <deal.II/lac/trilinos_block_vector.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+
+using namespace dealii;
+
+template<int dim>
+void build_matrix_vector(TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::BlockVector &vector,
+ const FE_Q<dim> &fe_test,
+ const FE_Q<dim> &fe_trial)
+{
+ deallog.push("build_matrix_vector");
+
+ // Configure block system
+ std::vector<types::global_dof_index> dofs_per_block(2);
+ std::vector<unsigned int> block_component (2);
+ block_component[0] = 0;
+ block_component[1] = 1;
+
+ // Initialise
+ const FESystem<dim> fe (fe_test,1,fe_trial,1);
+ Triangulation<dim> triangulation;
+ QGauss<dim> quadrature_formula(fe_trial.degree+1);
+ DoFHandler<dim> dof_handler (triangulation);
+ ConstraintMatrix constraints;
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+
+ // Make grid
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (2);
+
+ // Setup system
+ dof_handler.distribute_dofs (fe);
+ DoFRenumbering::component_wise(dof_handler, block_component);
+ DoFTools::count_dofs_per_block(dof_handler, dofs_per_block,
+ block_component);
+
+ constraints.clear();
+ constraints.close();
+ BlockDynamicSparsityPattern dsp (dofs_per_block,dofs_per_block);
+ DoFTools::make_sparsity_pattern (dof_handler, dsp, constraints, false);
+
+ matrix.clear();
+ // vector.clear();
+ matrix.reinit (dsp);
+ vector.reinit (dofs_per_block);
+
+ // Assemble system: Mass matrix and constrant RHS vector
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_JxW_values);
+ const unsigned int n_q_points = quadrature_formula.size();
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ {
+ fe_values.reinit (cell);
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ // Globally symmetric contributions, but the off-diagonal
+ // blocks are non-square
+ // This is useful for checking implementation of transpose operator
+ cell_matrix(i,j) += (fe_values.shape_value (i, q_point) *
+ fe_values.shape_value (j, q_point) *
+ fe_values.JxW (q_point));
+
+ // Non-trivial vector contribution
+ cell_rhs(i) += (fe_values.shape_value (i, q_point) * 1.0 *
+ fe_values.JxW (q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global(cell_matrix, cell_rhs,
+ local_dof_indices,
+ matrix, vector);
+ }
+ matrix.compress(VectorOperation::add);
+ vector.compress(VectorOperation::add);
+
+ deallog.pop();
+}
+
+void evaluate_ops (const TrilinosWrappers::BlockSparseMatrix &matrix,
+ const TrilinosWrappers::BlockVector &vector)
+{
+ const double tol = 1e-12;
+ typedef dealii::TrilinosWrappers::SparseMatrix MatrixType;
+ typedef dealii::TrilinosWrappers::Vector VectorType;
+ typedef dealii::TrilinosWrappers::internal::LinearOperator::TrilinosPayload PayloadType;
+ typedef typename PayloadType::VectorType PayloadVectorType;
+ typedef dealii::types::global_dof_index size_type;
+
+ deallog.push("System info");
+ {
+ deallog
+ << "Matrix frobenius norm" << std::endl
+ << "block(0,0): " << matrix.block(0,0).frobenius_norm() << std::endl
+ << "block(0,1): " << matrix.block(0,1).frobenius_norm() << std::endl
+ << "block(1,0): " << matrix.block(1,0).frobenius_norm() << std::endl
+ << "block(1,1): " << matrix.block(1,1).frobenius_norm() << std::endl
+ << "Vector L1 norm" << std::endl
+ << "block(0): " << vector.block(0).l1_norm() << std::endl
+ << "block(1): " << vector.block(1).l1_norm() << std::endl
+ << "Vector L2 norm" << std::endl
+ << "block(0): " << vector.block(0).l2_norm() << std::endl
+ << "block(1): " << vector.block(1).l2_norm() << std::endl
+ << "Vector Linfty norm" << std::endl
+ << "block(0): " << vector.block(0).linfty_norm() << std::endl
+ << "block(1): " << vector.block(1).linfty_norm() << std::endl
+ << std::endl;
+ }
+ deallog.pop();
+
+ deallog.push("Matrix TW::Vector");
+ {
+ {
+ deallog.push("vmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(0);
+ const VectorType &r = vector.block(1);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ const auto lo_A = linear_operator<VectorType,VectorType,PayloadType>(A);
+
+ // First test the standard operation to get a reference result
+ A.vmult(out_ref,b);
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ out_lo = lo_A*b; // lo_A.vmult(out_lo,b);
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff1 = out_lo-out_ref;
+ Assert(std::sqrt(diff1.norm_sqr()) < tol,
+ ExcMessage("LinearOperator vmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ // These Trilinos vectors are composed of a set of pointers to elements
+ // in the deal.II itself, and so don't own their own elements.
+ // So when we perform an operation on them, we perform the
+ // operation on the deal.II vector itself.
+ // For how the Epetra_MultiVector's are initialised, see
+ // TrilinosWrappers::SparseMatrix::vmult
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A.UseTranspose()
+ << std::endl;
+ lo_A.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload vmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+
+ {
+ deallog.push("Tvmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(1);
+ const VectorType &r = vector.block(0);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ // First test the standard operation to get a reference result
+ A.Tvmult(out_ref,b);
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ const auto lo_A_T = transpose_operator<VectorType,VectorType,PayloadType>(A);
+ out_lo = lo_A_T*b; // lo_A.Tvmult(out_lo,b);
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff1 = out_lo-out_ref;
+ Assert(std::sqrt(diff1.norm_sqr()) < tol,
+ ExcMessage("LinearOperator Tvmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A_T.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A_T.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A_T.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A_T.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A_T.UseTranspose()
+ << std::endl;
+ lo_A_T.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload Tvmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+
+ {
+ deallog.push("Composite vmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(0);
+ const VectorType &r = vector.block(0);
+ const VectorType &i = vector.block(1);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ // First test the standard operation to get a reference result
+ {
+ VectorType int_lo_pyld (i); //intermediate solution
+ A.vmult(int_lo_pyld,b);
+ A.Tvmult(out_ref,int_lo_pyld);
+ }
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ const auto lo_A = linear_operator<VectorType,VectorType,PayloadType>(A);
+ const auto lo_A_T = transpose_operator<VectorType,VectorType,PayloadType>(A);
+ out_lo = (lo_A_T*lo_A)*b;
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff1 = out_lo-out_ref;
+ Assert(std::sqrt(diff1.norm_sqr()) < tol,
+ ExcMessage("LinearOperator composite vmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ const auto lo_A_T_x_lo_A = lo_A_T*lo_A; // Construct composite operator
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A_T_x_lo_A.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A_T_x_lo_A.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A_T_x_lo_A.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A_T_x_lo_A.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A_T_x_lo_A.UseTranspose()
+ << std::endl;
+ lo_A_T_x_lo_A.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload composite vmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+
+ {
+ deallog.push("Composite mult Tvmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(1);
+ const VectorType &r = vector.block(1);
+ const VectorType &i = vector.block(0);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ // First test the standard operation to get a reference result
+ {
+ VectorType int_lo_pyld (i); //intermediate
+ A.Tvmult(int_lo_pyld,b);
+ A.vmult(out_ref,int_lo_pyld);
+ }
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ const auto lo_A = linear_operator<VectorType,VectorType,PayloadType>(A);
+ const auto lo_A_T = transpose_operator<VectorType,VectorType,PayloadType>(A);
+ out_lo = (lo_A*lo_A_T)*b;
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff = out_lo-out_ref;
+ Assert(std::sqrt(diff.norm_sqr()) < tol,
+ ExcMessage("LinearOperator composite Tvmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ const auto lo_A_x_lo_A_T = transpose_operator(lo_A*lo_A_T); // Construct composite operator
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A_x_lo_A_T.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A_x_lo_A_T.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A_x_lo_A_T.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A_x_lo_A_T.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A_x_lo_A_T.UseTranspose()
+ << std::endl;
+ lo_A_x_lo_A_T.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload composite vmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+ }
+ deallog.pop();
+ deallog << "Matrix TW::Vector OK" << std::endl;
+}
+
+int main(int argc, char *argv[])
+{
+ const int dim = 2;
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.depth_console(0);
+ deallog << std::setprecision(10);
+
+ FE_Q<dim> fe_test_1 (1);
+ FE_Q<dim> fe_test_2 (2);
+ FE_Q<dim> fe_trial (1);
+
+ TrilinosWrappers::BlockSparseMatrix A;
+ TrilinosWrappers::BlockVector b;
+
+ deallog.push("Square");
+ build_matrix_vector<dim>(A,b,fe_test_1,fe_trial);
+ evaluate_ops(A,b);
+ deallog.pop();
+
+ deallog << std::endl << std::endl;
+
+ deallog.push("Non-square");
+ build_matrix_vector<dim>(A,b,fe_test_2,fe_trial);
+ evaluate_ops(A,b);
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:Square:System info::Matrix frobenius norm
+DEAL:0:Square:System info::block(0,0): 0.4444444444
+DEAL:0:Square:System info::block(0,1): 0.4444444444
+DEAL:0:Square:System info::block(1,0): 0.4444444444
+DEAL:0:Square:System info::block(1,1): 0.4444444444
+DEAL:0:Square:System info::Vector L1 norm
+DEAL:0:Square:System info::block(0): 4.000000000
+DEAL:0:Square:System info::block(1): 4.000000000
+DEAL:0:Square:System info::Vector L2 norm
+DEAL:0:Square:System info::block(0): 0.8750000000
+DEAL:0:Square:System info::block(1): 0.8750000000
+DEAL:0:Square:System info::Vector Linfty norm
+DEAL:0:Square:System info::block(0): 0.2500000000
+DEAL:0:Square:System info::block(1): 0.2500000000
+DEAL:0:Square:System info::
+DEAL:0:Square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:vmult::Reference result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:0:Square::Matrix TW::Vector OK
+DEAL:0::
+DEAL:0::
+DEAL:0:Non-square:System info::Matrix frobenius norm
+DEAL:0:Non-square:System info::block(0,0): 0.2962962963
+DEAL:0:Non-square:System info::block(0,1): 0.3194444444
+DEAL:0:Non-square:System info::block(1,0): 0.3194444444
+DEAL:0:Non-square:System info::block(1,1): 0.4444444444
+DEAL:0:Non-square:System info::Vector L1 norm
+DEAL:0:Non-square:System info::block(0): 4.000000000
+DEAL:0:Non-square:System info::block(1): 4.000000000
+DEAL:0:Non-square:System info::Vector L2 norm
+DEAL:0:Non-square:System info::block(0): 0.5416666667
+DEAL:0:Non-square:System info::block(1): 0.8750000000
+DEAL:0:Non-square:System info::Vector Linfty norm
+DEAL:0:Non-square:System info::block(0): 0.1111111111
+DEAL:0:Non-square:System info::block(1): 0.2500000000
+DEAL:0:Non-square:System info::
+DEAL:0:Non-square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 25
+DEAL:0:Non-square:Matrix TW::Vector:vmult::Reference result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 81
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 81
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 25
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.0001864259953
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.0001864259953
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.0001864259953
+DEAL:0:Non-square::Matrix TW::Vector OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Test TrilinosPayload vmult and Tvmult operations for MPI vectors,
+// specifically under conditions where the transpose flag is set
+// This is the parallel version of linear_operator_13.cc
+
+#include "../tests.h"
+
+#include <deal.II/lac/trilinos_linear_operator.h>
+#include <deal.II/lac/packaged_operation.h>
+
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/block_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/base/function.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/lac/sparsity_tools.h>
+
+#include <deal.II/lac/trilinos_block_sparse_matrix.h>
+#include <deal.II/lac/trilinos_block_vector.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+
+using namespace dealii;
+
+template<int dim>
+void build_matrix_vector(TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::MPI::BlockVector &vector,
+ const FE_Q<dim> &fe_test,
+ const FE_Q<dim> &fe_trial)
+{
+ deallog.push("build_matrix_vector");
+
+ MPI_Comm mpi_communicator (MPI_COMM_WORLD);
+
+ // Configure block system
+ // Block data
+ const unsigned int n_blocks = 2;
+ std::vector<unsigned int> block_component (n_blocks);
+ block_component[0] = 0;
+ block_component[1] = 1;
+ std::vector<types::global_dof_index> dofs_per_block(n_blocks);
+
+ // DoF index data
+ std::vector<IndexSet> all_locally_owned_dofs;
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
+ std::vector<IndexSet> locally_owned_partitioning;
+ std::vector<IndexSet> locally_relevant_partitioning;
+
+ // Initialise
+ const FESystem<dim> fe (fe_test,1,fe_trial,1);
+ parallel::distributed::Triangulation<dim> triangulation (mpi_communicator,
+ typename Triangulation<dim>::MeshSmoothing
+ (Triangulation<dim>::smoothing_on_refinement |
+ Triangulation<dim>::smoothing_on_coarsening));
+ QGauss<dim> quadrature_formula(fe_trial.degree+1);
+ DoFHandler<dim> dof_handler (triangulation);
+ ConstraintMatrix constraints;
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+
+ // Make grid
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (2);
+
+ // Setup system
+ dof_handler.distribute_dofs (fe);
+ DoFRenumbering::component_wise(dof_handler, block_component);
+ DoFTools::count_dofs_per_block(dof_handler, dofs_per_block,
+ block_component);
+
+ locally_owned_dofs = dof_handler.locally_owned_dofs();
+ locally_owned_partitioning.push_back(locally_owned_dofs.get_view(0,dofs_per_block[0]));
+ locally_owned_partitioning.push_back(locally_owned_dofs.get_view(dofs_per_block[0],
+ dofs_per_block[0]+dofs_per_block[1]));
+ DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+ locally_relevant_partitioning.push_back(locally_relevant_dofs.get_view(0,dofs_per_block[0]));
+ locally_relevant_partitioning.push_back(locally_relevant_dofs.get_view(dofs_per_block[0],
+ dofs_per_block[0]+dofs_per_block[1]));
+
+ constraints.clear();
+ constraints.reinit(locally_relevant_dofs);
+ constraints.close();
+
+ // See https://www.dealii.org/developer/doxygen/deal.II/step_32.html#TheBoussinesqFlowProblemsetupfunctions
+ Table<2,DoFTools::Coupling> coupling (n_blocks,n_blocks);
+ coupling.fill(DoFTools::always);
+ TrilinosWrappers::BlockSparsityPattern dsp (locally_owned_partitioning,
+ locally_owned_partitioning,
+ locally_relevant_partitioning,
+ mpi_communicator);
+ DoFTools::make_sparsity_pattern (dof_handler,
+ coupling, dsp,
+ constraints, false,
+ Utilities::MPI::this_mpi_process(mpi_communicator));
+ dsp.compress();
+
+ matrix.clear();
+ matrix.reinit (dsp);
+ vector.reinit (locally_owned_partitioning,
+ mpi_communicator);
+
+ // Assemble system: Mass matrix and constrant RHS vector
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_JxW_values);
+ const unsigned int n_q_points = quadrature_formula.size();
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ {
+ if (cell->is_locally_owned() == false) continue;
+
+ fe_values.reinit (cell);
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ // Globally symmetric contributions, but the off-diagonal
+ // blocks are non-square
+ // This is useful for checking implementation of transpose operator
+ cell_matrix(i,j) += (fe_values.shape_value (i, q_point) *
+ fe_values.shape_value (j, q_point) *
+ fe_values.JxW (q_point));
+
+ // Non-trivial vector contribution
+ cell_rhs(i) += (fe_values.shape_value (i, q_point) * 1.0 *
+ fe_values.JxW (q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global(cell_matrix, cell_rhs,
+ local_dof_indices,
+ matrix, vector);
+ }
+ matrix.compress(VectorOperation::add);
+ vector.compress(VectorOperation::add);
+
+ deallog.pop();
+}
+
+void evaluate_ops (const TrilinosWrappers::BlockSparseMatrix &matrix,
+ const TrilinosWrappers::MPI::BlockVector &vector)
+{
+ const double tol = 1e-12;
+ typedef dealii::TrilinosWrappers::SparseMatrix MatrixType;
+ typedef dealii::TrilinosWrappers::MPI::Vector VectorType;
+ typedef dealii::TrilinosWrappers::internal::LinearOperator::TrilinosPayload PayloadType;
+ typedef typename PayloadType::VectorType PayloadVectorType;
+ typedef dealii::types::global_dof_index size_type;
+
+ deallog.push("System info");
+ {
+ deallog
+ << "Matrix frobenius norm" << std::endl
+ << "block(0,0): " << matrix.block(0,0).frobenius_norm() << std::endl
+ << "block(0,1): " << matrix.block(0,1).frobenius_norm() << std::endl
+ << "block(1,0): " << matrix.block(1,0).frobenius_norm() << std::endl
+ << "block(1,1): " << matrix.block(1,1).frobenius_norm() << std::endl
+ << "Vector L1 norm" << std::endl
+ << "block(0): " << vector.block(0).l1_norm() << std::endl
+ << "block(1): " << vector.block(1).l1_norm() << std::endl
+ << "Vector L2 norm" << std::endl
+ << "block(0): " << vector.block(0).l2_norm() << std::endl
+ << "block(1): " << vector.block(1).l2_norm() << std::endl
+ << "Vector Linfty norm" << std::endl
+ << "block(0): " << vector.block(0).linfty_norm() << std::endl
+ << "block(1): " << vector.block(1).linfty_norm() << std::endl
+ << std::endl;
+ }
+ deallog.pop();
+
+ deallog.push("Matrix TW::Vector");
+ {
+ {
+ deallog.push("vmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(0);
+ const VectorType &r = vector.block(1);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ const auto lo_A = linear_operator<VectorType,VectorType,PayloadType>(A);
+
+ // First test the standard operation to get a reference result
+ A.vmult(out_ref,b);
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ out_lo = lo_A*b; // lo_A.vmult(out_lo,b);
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff1 = out_lo-out_ref;
+ Assert(std::sqrt(diff1.norm_sqr()) < tol,
+ ExcMessage("LinearOperator vmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ // These Trilinos vectors are composed of a set of pointers to elements
+ // in the deal.II itself, and so don't own their own elements.
+ // So when we perform an operation on them, we perform the
+ // operation on the deal.II vector itself.
+ // For how the Epetra_MultiVector's are initialised, see
+ // TrilinosWrappers::SparseMatrix::vmult
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A.UseTranspose()
+ << std::endl;
+ lo_A.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload vmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+
+ {
+ deallog.push("Tvmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(1);
+ const VectorType &r = vector.block(0);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ // First test the standard operation to get a reference result
+ A.Tvmult(out_ref,b);
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ const auto lo_A_T = transpose_operator<VectorType,VectorType,PayloadType>(A);
+ out_lo = lo_A_T*b; // lo_A.Tvmult(out_lo,b);
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff1 = out_lo-out_ref;
+ Assert(std::sqrt(diff1.norm_sqr()) < tol,
+ ExcMessage("LinearOperator Tvmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A_T.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A_T.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A_T.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A_T.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A_T.UseTranspose()
+ << std::endl;
+ lo_A_T.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload Tvmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+
+ {
+ deallog.push("Composite vmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(0);
+ const VectorType &r = vector.block(0);
+ const VectorType &i = vector.block(1);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ // First test the standard operation to get a reference result
+ {
+ VectorType int_lo_pyld (i); //intermediate solution
+ A.vmult(int_lo_pyld,b);
+ A.Tvmult(out_ref,int_lo_pyld);
+ }
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ const auto lo_A = linear_operator<VectorType,VectorType,PayloadType>(A);
+ const auto lo_A_T = transpose_operator<VectorType,VectorType,PayloadType>(A);
+ out_lo = (lo_A_T*lo_A)*b;
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff1 = out_lo-out_ref;
+ Assert(std::sqrt(diff1.norm_sqr()) < tol,
+ ExcMessage("LinearOperator composite vmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ const auto lo_A_T_x_lo_A = lo_A_T*lo_A; // Construct composite operator
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A_T_x_lo_A.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A_T_x_lo_A.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A_T_x_lo_A.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A_T_x_lo_A.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A_T_x_lo_A.UseTranspose()
+ << std::endl;
+ lo_A_T_x_lo_A.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload composite vmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+
+ {
+ deallog.push("Composite mult Tvmult");
+
+ const MatrixType &A = matrix.block(1,0);
+ const VectorType &b = vector.block(1);
+ const VectorType &r = vector.block(1);
+ const VectorType &i = vector.block(0);
+ Assert(A.frobenius_norm() > 0.0, ExcInternalError());
+ Assert(b.l2_norm() > 0.0, ExcInternalError());
+ deallog
+ << "System size: "
+ << " A.m(): " << A.m()
+ << " A.n(): " << A.n()
+ << " b.size(): " << b.size()
+ << " r.size(): " << r.size()
+ << std::endl;
+
+ VectorType out_ref (r);
+ VectorType out_lo (r);
+ VectorType out_lo_pyld (r);
+
+ // First test the standard operation to get a reference result
+ {
+ VectorType int_lo_pyld (i); //intermediate
+ A.Tvmult(int_lo_pyld,b);
+ A.vmult(out_ref,int_lo_pyld);
+ }
+ deallog << "Reference result norm squared: " << out_ref.norm_sqr() << std::endl;
+
+ // Next we check the native operation of the LinearOperator
+ const auto lo_A = linear_operator<VectorType,VectorType,PayloadType>(A);
+ const auto lo_A_T = transpose_operator<VectorType,VectorType,PayloadType>(A);
+ out_lo = (lo_A*lo_A_T)*b;
+ deallog << "LinearOperator result norm squared: " << out_lo.norm_sqr() << std::endl;
+ const VectorType diff = out_lo-out_ref;
+ Assert(std::sqrt(diff.norm_sqr()) < tol,
+ ExcMessage("LinearOperator composite Tvmult operation does not match reference result"));
+
+ // Lastly we test functionality added by the Payload
+ const auto lo_A_x_lo_A_T = transpose_operator(lo_A*lo_A_T); // Construct composite operator
+ out_lo_pyld = 0.0;
+ const size_type o_local_size = out_lo_pyld.end() - out_lo_pyld.begin();
+ AssertDimension (o_local_size, static_cast<size_type>(lo_A_x_lo_A_T.OperatorRangeMap().NumMyPoints()));
+ PayloadVectorType tril_out_lo_pyld (View, lo_A_x_lo_A_T.OperatorRangeMap(),
+ const_cast<TrilinosScalar *>(out_lo_pyld.begin()),
+ o_local_size, 1);
+ const size_type b_local_size = b.end() - b.begin();
+ AssertDimension (b_local_size, static_cast<size_type>(lo_A_x_lo_A_T.OperatorDomainMap().NumMyPoints()));
+ PayloadVectorType tril_b_pyld (View, lo_A_x_lo_A_T.OperatorDomainMap(),
+ const_cast<TrilinosScalar *>(b.begin()),
+ b_local_size, 1);
+
+ deallog
+ << "LinearOperator status:"
+ << " UseTranspose: " << lo_A_x_lo_A_T.UseTranspose()
+ << std::endl;
+ lo_A_x_lo_A_T.Apply(tril_b_pyld,tril_out_lo_pyld);
+ deallog << "LinearOperator payload result norm squared: " << out_lo_pyld.norm_sqr() << std::endl;
+ const VectorType diff2 = out_lo_pyld-out_ref;
+ Assert(std::sqrt(diff2.norm_sqr()) < tol,
+ ExcMessage("LinearOperator payload composite vmult operation does not match reference result"));
+
+ deallog.pop();
+ }
+ }
+ deallog.pop();
+ deallog << "Matrix TW::Vector OK" << std::endl;
+}
+
+int main(int argc, char *argv[])
+{
+ const int dim = 2;
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.depth_console(0);
+ deallog << std::setprecision(10);
+
+ FE_Q<dim> fe_test_1 (1);
+ FE_Q<dim> fe_test_2 (2);
+ FE_Q<dim> fe_trial (1);
+
+ TrilinosWrappers::BlockSparseMatrix A;
+ TrilinosWrappers::MPI::BlockVector b;
+
+ deallog.push("Square");
+ build_matrix_vector<dim>(A,b,fe_test_1,fe_trial);
+ evaluate_ops(A,b);
+ deallog.pop();
+
+ deallog << std::endl << std::endl;
+
+ deallog.push("Non-square");
+ build_matrix_vector<dim>(A,b,fe_test_2,fe_trial);
+ evaluate_ops(A,b);
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:Square:System info::Matrix frobenius norm
+DEAL:0:Square:System info::block(0,0): 0.4444444444
+DEAL:0:Square:System info::block(0,1): 0.4444444444
+DEAL:0:Square:System info::block(1,0): 0.4444444444
+DEAL:0:Square:System info::block(1,1): 0.4444444444
+DEAL:0:Square:System info::Vector L1 norm
+DEAL:0:Square:System info::block(0): 4.000000000
+DEAL:0:Square:System info::block(1): 4.000000000
+DEAL:0:Square:System info::Vector L2 norm
+DEAL:0:Square:System info::block(0): 0.8750000000
+DEAL:0:Square:System info::block(1): 0.8750000000
+DEAL:0:Square:System info::Vector Linfty norm
+DEAL:0:Square:System info::block(0): 0.2500000000
+DEAL:0:Square:System info::block(1): 0.2500000000
+DEAL:0:Square:System info::
+DEAL:0:Square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:vmult::Reference result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:0:Square::Matrix TW::Vector OK
+DEAL:0::
+DEAL:0::
+DEAL:0:Non-square:System info::Matrix frobenius norm
+DEAL:0:Non-square:System info::block(0,0): 0.2962962963
+DEAL:0:Non-square:System info::block(0,1): 0.3194444444
+DEAL:0:Non-square:System info::block(1,0): 0.3194444444
+DEAL:0:Non-square:System info::block(1,1): 0.4444444444
+DEAL:0:Non-square:System info::Vector L1 norm
+DEAL:0:Non-square:System info::block(0): 4.000000000
+DEAL:0:Non-square:System info::block(1): 4.000000000
+DEAL:0:Non-square:System info::Vector L2 norm
+DEAL:0:Non-square:System info::block(0): 0.5416666667
+DEAL:0:Non-square:System info::block(1): 0.8750000000
+DEAL:0:Non-square:System info::Vector Linfty norm
+DEAL:0:Non-square:System info::block(0): 0.1111111111
+DEAL:0:Non-square:System info::block(1): 0.2500000000
+DEAL:0:Non-square:System info::
+DEAL:0:Non-square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 25
+DEAL:0:Non-square:Matrix TW::Vector:vmult::Reference result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 81
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 81
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 25
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.0001864259953
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.0001864259953
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.0001864259953
+DEAL:0:Non-square::Matrix TW::Vector OK
--- /dev/null
+
+DEAL:0:Square:System info::Matrix frobenius norm
+DEAL:0:Square:System info::block(0,0): 0.4444444444
+DEAL:0:Square:System info::block(0,1): 0.4444444444
+DEAL:0:Square:System info::block(1,0): 0.4444444444
+DEAL:0:Square:System info::block(1,1): 0.4444444444
+DEAL:0:Square:System info::Vector L1 norm
+DEAL:0:Square:System info::block(0): 4.000000000
+DEAL:0:Square:System info::block(1): 4.000000000
+DEAL:0:Square:System info::Vector L2 norm
+DEAL:0:Square:System info::block(0): 0.8750000000
+DEAL:0:Square:System info::block(1): 0.8750000000
+DEAL:0:Square:System info::Vector Linfty norm
+DEAL:0:Square:System info::block(0): 0.2500000000
+DEAL:0:Square:System info::block(1): 0.2500000000
+DEAL:0:Square:System info::
+DEAL:0:Square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:vmult::Reference result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.001493556346
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:0:Square::Matrix TW::Vector OK
+DEAL:0::
+DEAL:0::
+DEAL:0:Non-square:System info::Matrix frobenius norm
+DEAL:0:Non-square:System info::block(0,0): 0.2962962963
+DEAL:0:Non-square:System info::block(0,1): 0.3194444444
+DEAL:0:Non-square:System info::block(1,0): 0.3194444444
+DEAL:0:Non-square:System info::block(1,1): 0.4444444444
+DEAL:0:Non-square:System info::Vector L1 norm
+DEAL:0:Non-square:System info::block(0): 4.000000000
+DEAL:0:Non-square:System info::block(1): 4.000000000
+DEAL:0:Non-square:System info::Vector L2 norm
+DEAL:0:Non-square:System info::block(0): 0.5416666667
+DEAL:0:Non-square:System info::block(1): 0.8750000000
+DEAL:0:Non-square:System info::Vector Linfty norm
+DEAL:0:Non-square:System info::block(0): 0.1111111111
+DEAL:0:Non-square:System info::block(1): 0.2500000000
+DEAL:0:Non-square:System info::
+DEAL:0:Non-square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 25
+DEAL:0:Non-square:Matrix TW::Vector:vmult::Reference result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Non-square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.004314239460
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 81
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Non-square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.01177375699
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 81
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:0:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 6.726708649e-05
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 25
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.0001864259953
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.0001864259953
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:0:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.0001864259953
+DEAL:0:Non-square::Matrix TW::Vector OK
+
+DEAL:1:Square:System info::Matrix frobenius norm
+DEAL:1:Square:System info::block(0,0): 0.4444444444
+DEAL:1:Square:System info::block(0,1): 0.4444444444
+DEAL:1:Square:System info::block(1,0): 0.4444444444
+DEAL:1:Square:System info::block(1,1): 0.4444444444
+DEAL:1:Square:System info::Vector L1 norm
+DEAL:1:Square:System info::block(0): 4.000000000
+DEAL:1:Square:System info::block(1): 4.000000000
+DEAL:1:Square:System info::Vector L2 norm
+DEAL:1:Square:System info::block(0): 0.8750000000
+DEAL:1:Square:System info::block(1): 0.8750000000
+DEAL:1:Square:System info::Vector Linfty norm
+DEAL:1:Square:System info::block(0): 0.2500000000
+DEAL:1:Square:System info::block(1): 0.2500000000
+DEAL:1:Square:System info::
+DEAL:1:Square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:1:Square:Matrix TW::Vector:vmult::Reference result norm squared: 0.03291452667
+DEAL:1:Square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.03291452667
+DEAL:1:Square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:1:Square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:1:Square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:1:Square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.03291452667
+DEAL:1:Square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.03291452667
+DEAL:1:Square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:1:Square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.03291452667
+DEAL:1:Square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:1:Square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 0.001493556346
+DEAL:1:Square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 0.001493556346
+DEAL:1:Square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:1:Square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:1:Square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 25 b.size(): 25 r.size(): 25
+DEAL:1:Square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.001493556346
+DEAL:1:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.001493556346
+DEAL:1:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:1:Square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.001493556346
+DEAL:1:Square::Matrix TW::Vector OK
+DEAL:1::
+DEAL:1::
+DEAL:1:Non-square:System info::Matrix frobenius norm
+DEAL:1:Non-square:System info::block(0,0): 0.2962962963
+DEAL:1:Non-square:System info::block(0,1): 0.3194444444
+DEAL:1:Non-square:System info::block(1,0): 0.3194444444
+DEAL:1:Non-square:System info::block(1,1): 0.4444444444
+DEAL:1:Non-square:System info::Vector L1 norm
+DEAL:1:Non-square:System info::block(0): 4.000000000
+DEAL:1:Non-square:System info::block(1): 4.000000000
+DEAL:1:Non-square:System info::Vector L2 norm
+DEAL:1:Non-square:System info::block(0): 0.5416666667
+DEAL:1:Non-square:System info::block(1): 0.8750000000
+DEAL:1:Non-square:System info::Vector Linfty norm
+DEAL:1:Non-square:System info::block(0): 0.1111111111
+DEAL:1:Non-square:System info::block(1): 0.2500000000
+DEAL:1:Non-square:System info::
+DEAL:1:Non-square:Matrix TW::Vector:vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 25
+DEAL:1:Non-square:Matrix TW::Vector:vmult::Reference result norm squared: 0.004314239460
+DEAL:1:Non-square:Matrix TW::Vector:vmult::LinearOperator result norm squared: 0.004314239460
+DEAL:1:Non-square:Matrix TW::Vector:vmult::LinearOperator status: UseTranspose: 0
+DEAL:1:Non-square:Matrix TW::Vector:vmult::LinearOperator payload result norm squared: 0.004314239460
+DEAL:1:Non-square:Matrix TW::Vector:Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 81
+DEAL:1:Non-square:Matrix TW::Vector:Tvmult::Reference result norm squared: 0.01177375699
+DEAL:1:Non-square:Matrix TW::Vector:Tvmult::LinearOperator result norm squared: 0.01177375699
+DEAL:1:Non-square:Matrix TW::Vector:Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:1:Non-square:Matrix TW::Vector:Tvmult::LinearOperator payload result norm squared: 0.01177375699
+DEAL:1:Non-square:Matrix TW::Vector:Composite vmult::System size: A.m(): 25 A.n(): 81 b.size(): 81 r.size(): 81
+DEAL:1:Non-square:Matrix TW::Vector:Composite vmult::Reference result norm squared: 6.726708649e-05
+DEAL:1:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator result norm squared: 6.726708649e-05
+DEAL:1:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator status: UseTranspose: 0
+DEAL:1:Non-square:Matrix TW::Vector:Composite vmult::LinearOperator payload result norm squared: 6.726708649e-05
+DEAL:1:Non-square:Matrix TW::Vector:Composite mult Tvmult::System size: A.m(): 25 A.n(): 81 b.size(): 25 r.size(): 25
+DEAL:1:Non-square:Matrix TW::Vector:Composite mult Tvmult::Reference result norm squared: 0.0001864259953
+DEAL:1:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator result norm squared: 0.0001864259953
+DEAL:1:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator status: UseTranspose: 1
+DEAL:1:Non-square:Matrix TW::Vector:Composite mult Tvmult::LinearOperator payload result norm squared: 0.0001864259953
+DEAL:1:Non-square::Matrix TW::Vector OK
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Test schur complement for Trilinos vectors
+// Should give the equivalent output as schur_complement_02
+
+#include "../tests.h"
+
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_block_sparse_matrix.h>
+#include <deal.II/lac/trilinos_block_vector.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_solver.h>
+
+#include <deal.II/lac/linear_operator.h>
+#include <deal.II/lac/trilinos_linear_operator.h>
+#include <deal.II/lac/packaged_operation.h>
+#include <deal.II/lac/schur_complement.h>
+
+#include <sstream>
+#include <cctype>
+
+// Have to remove the control commands returned from
+// Vector::print in order for the deallog to print properly
+// http://www.cplusplus.com/forum/general/76900/
+#define PRINTME(name, var) \
+ { \
+ std::ostringstream stream; \
+ var.print(stream); \
+ std::string str = stream.str(); \
+ str.resize(remove_if(str.begin(), str.end(),[](char x){return std::iscntrl(x);})-str.begin()); \
+ deallog \
+ << "RHS vector: " << name << ": " \
+ << str << std::endl; \
+ }
+
+using namespace dealii;
+
+
+int main(int argc,char **argv)
+{
+ initlog();
+ deallog.depth_console(0);
+ deallog << std::setprecision(10);
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+
+ // deal.II SparseMatrix
+ {
+
+ deallog << "Schur complement" << std::endl;
+ deallog.push("SC_SparseMatrix");
+
+ {
+ deallog << "SparseMatrix 1" << std::endl;
+
+ /* MATLAB / Gnu Octave code
+
+ clear all;
+ printf("SparseMatrix 1")
+ A = [1];
+ B = [2];
+ C = [3];
+ D = [4];
+ y = [6];
+
+ S = D - C*inv(A)*B
+
+ % vmult
+ g1 = S*y
+ % Tvmult
+ g2 = S'*y
+
+ g = [2];
+
+ % vmult_add
+ g3 = S*y + g
+ % Tvmult_add
+ g4 = S'*y + g
+
+ */
+
+ const unsigned int rc=1;
+ typedef TrilinosWrappers::Vector VectorType;
+
+ TrilinosWrappers::SparseMatrix A (rc,rc,rc);
+ TrilinosWrappers::SparseMatrix B (rc,rc,rc);
+ TrilinosWrappers::SparseMatrix C (rc,rc,rc);
+ TrilinosWrappers::SparseMatrix D (rc,rc,rc);
+
+ VectorType y (rc);
+ VectorType g (rc);
+ for (unsigned int i=0; i < rc; ++i)
+ {
+ A.set(i,i, 1.0*(i+1));
+ B.set(i,i, 2.0*(i+1));
+ C.set(i,i, 3.0*(i+1));
+ D.set(i,i, 4.0*(i+1));
+ y(i) = 6.0*(i+1);
+ g(i) = 2.0*(i+1);
+ }
+ A.compress(VectorOperation::insert);
+ B.compress(VectorOperation::insert);
+ C.compress(VectorOperation::insert);
+ D.compress(VectorOperation::insert);
+
+ // Note that the vector type has to be specified
+ // Also note that this call to linear_operator is
+ // actually to TrilinosWrappers::linear_operator
+ const auto lo_A = linear_operator<VectorType>(A);
+ const auto lo_B = linear_operator<VectorType>(B);
+ const auto lo_C = linear_operator<VectorType>(C);
+ const auto lo_D = linear_operator<VectorType>(D);
+
+ SolverControl solver_control_A (100, 1.0e-10);
+ TrilinosWrappers::SolverCG solver_A (solver_control_A);
+ TrilinosWrappers::PreconditionJacobi preconditioner_A;
+ preconditioner_A.initialize(A);
+ const auto lo_A_inv = inverse_operator(lo_A,
+ solver_A,
+ preconditioner_A);
+
+ const auto lo_S = schur_complement(lo_A_inv,lo_B,
+ lo_C,lo_D);
+ const auto lo_S_t = transpose_operator(lo_S);
+
+ const VectorType g1 = lo_S*y;
+ const VectorType g2 = lo_S_t *y;
+ const VectorType g3 = lo_S*y + g;
+ const VectorType g4 = lo_S_t *y + g;
+
+ PRINTME("g1",g1);
+ PRINTME("g2",g2);
+ PRINTME("g3",g3);
+ PRINTME("g4",g4);
+ }
+
+ deallog << "SparseMatrix OK" << std::endl;
+ }
+
+}
--- /dev/null
+
+DEAL::Schur complement
+DEAL:SC_SparseMatrix::SparseMatrix 1
+DEAL:SC_SparseMatrix::Convergence step 1 value 0.000000000
+DEAL:SC_SparseMatrix::Convergence step 1 value 0.000000000
+DEAL:SC_SparseMatrix::Convergence step 1 value 0.000000000
+DEAL:SC_SparseMatrix::Convergence step 1 value 0.000000000
+DEAL:SC_SparseMatrix::RHS vector: g1: -1.200e+01
+DEAL:SC_SparseMatrix::RHS vector: g2: -1.200e+01
+DEAL:SC_SparseMatrix::RHS vector: g3: -1.000e+01
+DEAL:SC_SparseMatrix::RHS vector: g4: -1.000e+01
+DEAL:SC_SparseMatrix::SparseMatrix OK
--- /dev/null
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2008 - 2015 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE at
+ * the top level of the deal.II distribution.
+ *
+ * ---------------------------------------------------------------------
+ */
+
+//
+// Check the operation of the Schur Complement for Trilinos vectors
+// and in parallel
+// This is a lightly modified version of the test "mpi/periodicity_02.cc"
+// The only difference is the linear solver routine, which now uses the
+// schur complement operation first tested in "lac/schur_complement_03.cc"
+//
+
+#define PERIODIC
+
+#include "../tests.h"
+#include <deal.II/base/conditional_ostream.h>
+
+#include <deal.II/distributed/grid_refinement.h>
+
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/lac/constraint_matrix.h>
+
+#include <deal.II/lac/block_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_block_sparse_matrix.h>
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/linear_operator.h>
+#include <deal.II/lac/trilinos_linear_operator.h>
+#include <deal.II/lac/packaged_operation.h>
+#include <deal.II/lac/schur_complement.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria_boundary_lib.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/numerics/error_estimator.h>
+
+namespace Step22
+{
+ using namespace dealii;
+
+ template <int dim>
+ class StokesProblem
+ {
+ public:
+ StokesProblem (const unsigned int degree);
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_system ();
+ void solve ();
+ void get_point_value (const Point<dim> point, const int proc,
+ Vector<double> &value) const;
+ void check_periodicity(const unsigned int cycle) const;
+ void output_results (const unsigned int refinement_cycle) const;
+ void refine_mesh ();
+
+ const unsigned int degree;
+
+ MPI_Comm mpi_communicator;
+
+ HyperShellBoundary<dim> boundary;
+ parallel::distributed::Triangulation<dim> triangulation;
+ FESystem<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ ConstraintMatrix constraints;
+ std::vector<IndexSet> owned_partitioning;
+ std::vector<IndexSet> relevant_partitioning;
+
+ TrilinosWrappers::BlockSparsityPattern sparsity_pattern;
+ TrilinosWrappers::BlockSparseMatrix system_matrix;
+
+ TrilinosWrappers::MPI::BlockVector solution;
+ TrilinosWrappers::MPI::BlockVector system_rhs;
+
+ ConditionalOStream pcout;
+ };
+
+
+
+ template <int dim>
+ class BoundaryValues : public Function<dim>
+ {
+ public:
+ BoundaryValues () : Function<dim>(dim+1) {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+
+ virtual void vector_value (const Point<dim> &p,
+ Vector<double> &value) const;
+ };
+
+
+ template <int dim>
+ double
+ BoundaryValues<dim>::value (const Point<dim> &p,
+ const unsigned int component) const
+ {
+ Assert (component < this->n_components,
+ ExcIndexRange (component, 0, this->n_components));
+
+ return (1-2*(component==0))*p[(component+1)%2]/(p[0]*p[0]+p[1]*p[1]);
+ }
+
+
+ template <int dim>
+ void
+ BoundaryValues<dim>::vector_value (const Point<dim> &p,
+ Vector<double> &values) const
+ {
+ for (unsigned int c=0; c<this->n_components; ++c)
+ values(c) = BoundaryValues<dim>::value (p, c);
+ }
+
+
+
+ template <int dim>
+ class RightHandSide : public Function<dim>
+ {
+ public:
+ RightHandSide () : Function<dim>(dim+1) {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+
+ virtual void vector_value (const Point<dim> &p,
+ Vector<double> &value) const;
+
+ };
+
+
+ template <int dim>
+ double
+ RightHandSide<dim>::value (const Point<dim> &/*p*/,
+ const unsigned int /*component*/) const
+ {
+ return 0;
+ }
+
+
+ template <int dim>
+ void
+ RightHandSide<dim>::vector_value (const Point<dim> &p,
+ Vector<double> &values) const
+ {
+ for (unsigned int c=0; c<this->n_components; ++c)
+ values(c) = RightHandSide<dim>::value (p, c);
+ }
+
+
+
+
+
+ template <class Matrix, class Preconditioner>
+ class InverseMatrix : public Preconditioner
+ {
+ public:
+ InverseMatrix (const Matrix &m,
+ const Preconditioner &preconditioner,
+ const IndexSet &locally_owned,
+ const MPI_Comm &mpi_communicator);
+
+ void vmult (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const;
+
+ private:
+ const SmartPointer<const Matrix> matrix;
+ const SmartPointer<const Preconditioner> preconditioner;
+
+ const MPI_Comm *mpi_communicator;
+ mutable TrilinosWrappers::MPI::Vector tmp;
+ };
+
+
+ template <class Matrix, class Preconditioner>
+ InverseMatrix<Matrix,Preconditioner>::InverseMatrix
+ (const Matrix &m,
+ const Preconditioner &preconditioner,
+ const IndexSet &locally_owned,
+ const MPI_Comm &mpi_communicator)
+ :
+ matrix (&m),
+ preconditioner (&preconditioner),
+ mpi_communicator (&mpi_communicator),
+ tmp(locally_owned, mpi_communicator)
+ {}
+
+
+
+ template <class Matrix, class Preconditioner>
+ void InverseMatrix<Matrix,Preconditioner>::vmult
+ (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const
+ {
+ SolverControl solver_control (src.size(), 1e-6*src.l2_norm(), false, false);
+ TrilinosWrappers::SolverCG cg (solver_control,
+ TrilinosWrappers::SolverCG::AdditionalData());
+
+ tmp = 0.;
+ cg.solve (*matrix, tmp, src, *preconditioner);
+ dst = tmp;
+ }
+
+
+
+ template <class Preconditioner>
+ class SchurComplement : public TrilinosWrappers::SparseMatrix
+ {
+ public:
+ SchurComplement ( const TrilinosWrappers::BlockSparseMatrix &system_matrix,
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ Preconditioner> &A_inverse,
+ const IndexSet &owned_pres,
+ const IndexSet &relevant_pres,
+ const MPI_Comm &mpi_communicator);
+
+ void vmult (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const;
+
+ private:
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> system_matrix;
+ const SmartPointer<const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ Preconditioner> > A_inverse;
+ mutable TrilinosWrappers::MPI::Vector tmp1, tmp2;
+ };
+
+
+
+ template <class Preconditioner>
+ SchurComplement<Preconditioner>::
+ SchurComplement (const TrilinosWrappers::BlockSparseMatrix &system_matrix,
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ Preconditioner> &A_inverse,
+ const IndexSet &owned_vel,
+ const IndexSet &relevant_vel,
+ const MPI_Comm &mpi_communicator)
+ :
+ system_matrix (&system_matrix),
+ A_inverse (&A_inverse),
+ tmp1 (owned_vel, mpi_communicator),
+ tmp2 (tmp1)
+ {}
+
+
+ template <class Preconditioner>
+ void SchurComplement<Preconditioner>::vmult
+ (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const
+ {
+ system_matrix->block(0,1).vmult (tmp1, src);
+ A_inverse->vmult (tmp2, tmp1);
+ system_matrix->block(1,0).vmult (dst, tmp2);
+ }
+
+
+
+
+ template <int dim>
+ StokesProblem<dim>::StokesProblem (const unsigned int degree)
+ :
+ degree (degree),
+ mpi_communicator (MPI_COMM_WORLD),
+ triangulation (mpi_communicator/*,
+ Triangulation<dim>::maximum_smoothing*/),
+ fe (FE_Q<dim>(degree+1), dim,
+ FE_Q<dim>(degree) , 1),
+ dof_handler (triangulation),
+ pcout (Utilities::MPI::this_mpi_process(mpi_communicator)
+ == 0
+ ?
+ deallog.get_file_stream()
+ :
+ std::cout,
+ (Utilities::MPI::this_mpi_process(mpi_communicator)
+ == 0))
+ {}
+
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::setup_dofs ()
+ {
+ dof_handler.distribute_dofs (fe);
+
+ std::vector<unsigned int> block_component (dim+1,0);
+ block_component[dim] = 1;
+ DoFRenumbering::component_wise (dof_handler, block_component);
+
+ std::vector<types::global_dof_index> dofs_per_block (2);
+ DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, block_component);
+ const unsigned int n_u = dofs_per_block[0],
+ n_p = dofs_per_block[1];
+
+ {
+ owned_partitioning.clear();
+ IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
+ owned_partitioning.push_back(locally_owned_dofs.get_view(0, n_u));
+ owned_partitioning.push_back(locally_owned_dofs.get_view(n_u, n_u+n_p));
+
+ relevant_partitioning.clear();
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs (dof_handler, locally_relevant_dofs);
+ relevant_partitioning.push_back(locally_relevant_dofs.get_view(0, n_u));
+ relevant_partitioning.push_back(locally_relevant_dofs.get_view(n_u, n_u+n_p));
+
+ constraints.clear ();
+ constraints.reinit(locally_relevant_dofs);
+
+ FEValuesExtractors::Vector velocities(0);
+ FEValuesExtractors::Scalar pressure(dim);
+
+ DoFTools::make_hanging_node_constraints (dof_handler,
+ constraints);
+#ifdef PERIODIC
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 3,
+ BoundaryValues<dim>(),
+ constraints,
+ fe.component_mask(velocities));
+#endif
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ BoundaryValues<dim>(),//ZeroFunction<dim>(dim+1),
+ constraints,
+ fe.component_mask(velocities));
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 1,
+ BoundaryValues<dim>(),//ZeroFunction<dim>(dim+1),
+ constraints,
+ fe.component_mask(velocities));
+
+#ifdef PERIODIC
+ std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> >
+ periodicity_vector;
+
+ FullMatrix<double> matrix(dim);
+ matrix[0][1]=1.;
+ matrix[1][0]=-1.;
+
+ std::vector<unsigned int> first_vector_components;
+ first_vector_components.push_back(0);
+
+ GridTools::collect_periodic_faces(
+ dof_handler, 2, 3, 1, periodicity_vector, Tensor<1, dim>(), matrix);
+
+ DoFTools::make_periodicity_constraints<DoFHandler<dim> >(
+ periodicity_vector, constraints, fe.component_mask(velocities),
+ first_vector_components);
+#endif
+ }
+
+ constraints.close ();
+
+ {
+ TrilinosWrappers::BlockSparsityPattern bsp
+ (owned_partitioning, owned_partitioning,
+ relevant_partitioning, mpi_communicator);
+
+ DoFTools::make_sparsity_pattern
+ (dof_handler, bsp, constraints, false,
+ Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ bsp.compress();
+
+ system_matrix.reinit (bsp);
+ }
+
+ system_rhs.reinit (owned_partitioning,
+ mpi_communicator);
+ solution.reinit (owned_partitioning, relevant_partitioning,
+ mpi_communicator);
+ }
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::assemble_system ()
+ {
+ system_matrix=0.;
+ system_rhs=0.;
+
+ QGauss<dim> quadrature_formula(degree+2);
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values |
+ update_quadrature_points |
+ update_JxW_values |
+ update_gradients);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> local_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> local_rhs (dofs_per_cell);
+
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ const RightHandSide<dim> right_hand_side;
+ std::vector<Vector<double> > rhs_values (n_q_points,
+ Vector<double>(dim+1));
+
+ const FEValuesExtractors::Vector velocities (0);
+ const FEValuesExtractors::Scalar pressure (dim);
+
+ std::vector<SymmetricTensor<2,dim> > symgrad_phi_u (dofs_per_cell);
+ std::vector<double> div_phi_u (dofs_per_cell);
+ std::vector<double> phi_p (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (cell->is_locally_owned())
+ {
+ fe_values.reinit (cell);
+ local_matrix = 0;
+ local_rhs = 0;
+
+ right_hand_side.vector_value_list(fe_values.get_quadrature_points(),
+ rhs_values);
+
+ for (unsigned int q=0; q<n_q_points; ++q)
+ {
+ for (unsigned int k=0; k<dofs_per_cell; ++k)
+ {
+ symgrad_phi_u[k] = fe_values[velocities].symmetric_gradient (k, q);
+ div_phi_u[k] = fe_values[velocities].divergence (k, q);
+ phi_p[k] = fe_values[pressure].value (k, q);
+ }
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<=i; ++j)
+ {
+ local_matrix(i,j) += (symgrad_phi_u[i] * symgrad_phi_u[j]
+ - div_phi_u[i] * phi_p[j]
+ - phi_p[i] * div_phi_u[j]
+ + phi_p[i] * phi_p[j])
+ * fe_values.JxW(q);
+
+ }
+
+ const unsigned int component_i =
+ fe.system_to_component_index(i).first;
+ local_rhs(i) += fe_values.shape_value(i,q) *
+ rhs_values[q](component_i) *
+ fe_values.JxW(q);
+ }
+ }
+
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ for (unsigned int j=i+1; j<dofs_per_cell; ++j)
+ local_matrix(i,j) = local_matrix(j,i);
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global (local_matrix, local_rhs,
+ local_dof_indices,
+ system_matrix, system_rhs);
+ }
+
+ system_matrix.compress (VectorOperation::add);
+ system_rhs.compress (VectorOperation::add);
+ }
+
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::solve ()
+ {
+ // Compute a reference solution
+ TrilinosWrappers::MPI::BlockVector solution_reference (owned_partitioning,
+ relevant_partitioning,
+ mpi_communicator);
+ {
+ deallog.push("ManualSchur");
+
+ TrilinosWrappers::PreconditionJacobi A_preconditioner;
+ A_preconditioner.initialize(system_matrix.block(0,0));
+
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ TrilinosWrappers::PreconditionJacobi>
+ A_inverse (system_matrix.block(0,0),
+ A_preconditioner,
+ owned_partitioning[0],
+ mpi_communicator);
+
+ TrilinosWrappers::MPI::BlockVector tmp (owned_partitioning,
+ mpi_communicator);
+
+ {
+ TrilinosWrappers::MPI::Vector schur_rhs (owned_partitioning[1],
+ mpi_communicator);
+ A_inverse.vmult (tmp.block(0), system_rhs.block(0));
+ system_matrix.block(1,0).vmult (schur_rhs, tmp.block(0));
+ schur_rhs -= system_rhs.block(1);
+
+ SchurComplement<TrilinosWrappers::PreconditionJacobi>
+ schur_complement (system_matrix, A_inverse,
+ owned_partitioning[0], relevant_partitioning[0],
+ mpi_communicator);
+
+ SolverControl solver_control (solution_reference.block(1).size(),
+ 1e-6*schur_rhs.l2_norm());
+ // TrilinosWrappers::SolverCG cg (solver_control,
+ // TrilinosWrappers::SolverCG::AdditionalData(true));
+ SolverCG<TrilinosWrappers::MPI::Vector> cg(solver_control);
+
+ TrilinosWrappers::PreconditionAMG preconditioner;
+ preconditioner.initialize (system_matrix.block(1,1));
+
+ cg.solve (schur_complement,
+ tmp.block(1),
+ schur_rhs,
+ preconditioner);
+
+ constraints.distribute (tmp);
+ solution_reference.block(1)=tmp.block(1);
+ }
+
+ {
+ system_matrix.block(0,1).vmult (tmp.block(0), tmp.block(1));
+ tmp.block(0) *= -1;
+ tmp.block(0) += system_rhs.block(0);
+
+ A_inverse.vmult (tmp.block(0), tmp.block(0));
+
+ constraints.distribute (tmp);
+ solution_reference.block(0)=tmp.block(0);
+ }
+
+ deallog.pop();
+ }
+
+ // Solution using SchurComplement operator
+ {
+ deallog.push("SchurComplementOp");
+
+ // Near duplicate of solver routine used in lac/schur_complement_03.cc
+ typedef TrilinosWrappers::MPI::Vector VectorType;
+ typedef TrilinosWrappers::PreconditionJacobi PreconditionerAType;
+ typedef TrilinosWrappers::PreconditionJacobi PreconditionerMType; // Solves in ~14 steps
+ // typedef TrilinosWrappers::PreconditionAMG PreconditionerMType; // Solves in 1 step
+ typedef TrilinosWrappers::SolverCG SolverType;
+
+ // Linear operators
+ // Also note that this call to linear_operator is
+ // actually to TrilinosWrappers::linear_operator
+ const auto A = linear_operator<VectorType>(system_matrix.block(0,0));
+ const auto B = linear_operator<VectorType>(system_matrix.block(0,1));
+ const auto C = linear_operator<VectorType>(system_matrix.block(1,0));
+ const auto M = linear_operator<VectorType>(system_matrix.block(1,1)); // Mass matrix stored in this block
+ const auto D0 = null_operator<VectorType>(M);
+
+ // Cheap inverse of A
+ PreconditionerAType preconditioner_A;
+ preconditioner_A.initialize (system_matrix.block(0,0),
+ PreconditionerAType::AdditionalData());
+ ReductionControl solver_control_A (system_matrix.block(0,0).m(),
+ 1e-12, 1e-6);
+ SolverType solver_A (solver_control_A);
+ const auto A_inv = inverse_operator(A, solver_A, preconditioner_A);
+
+ // Preconditioner for mass matrix
+ PreconditionerMType preconditioner_M;
+ preconditioner_M.initialize (system_matrix.block(1,1),
+ PreconditionerMType::AdditionalData());
+
+ // Schur complement
+ const auto S = schur_complement(A_inv,B,C,D0);
+
+ // // Proof of concept for more elaborate preconditioners constructed from
+ // // an inverse operator.
+ // // This particular implementation is rubbish, but it works. It would be
+ // // best to construct a more representative approximation S_inv_approx
+ // // and use that instead.
+ // // Approximate inverse of A
+ // typedef TrilinosWrappers::PreconditionAMG PreconditionerAExpType;
+ // PreconditionerAExpType preconditioner_A_exp;
+ // preconditioner_A_exp.initialize (system_matrix.block(0,0),
+ // PreconditionerAExpType::AdditionalData());
+ // const auto A_inv_approx = linear_operator<VectorType>(system_matrix.block(0,0), preconditioner_A_exp);
+ // // Approximate Schur complement
+ // const auto S_approx = schur_complement<VectorType>(A_inv_approx,B,C,D0);
+ // IterationNumberControl solver_control_S_approx (1, 1e-12); // system_matrix.block(1,1).m()
+ // SolverType solver_S_approx (solver_control_S_approx);
+ // const auto S_inv_approx = inverse_operator(S_approx,solver_S_approx,preconditioner_M);
+
+ // Inverse of Schur complement
+ ReductionControl solver_control_S (system_matrix.block(1,1).m(),
+ 1e-9, 1e-6);
+ SolverType solver_S (solver_control_S);
+ const auto S_inv = inverse_operator(S,solver_S,preconditioner_M);
+ // const auto S_inv = inverse_operator(S,solver_S,S_inv_approx);
+
+ TrilinosWrappers::MPI::BlockVector solution_tmp;
+ solution_tmp.reinit (owned_partitioning,
+ mpi_communicator);
+ VectorType &x = solution_tmp.block(0);
+ VectorType &y = solution_tmp.block(1);
+
+ const VectorType &f = system_rhs.block(0);
+ const VectorType &g = system_rhs.block(1);
+ deallog.push("Pre");
+ // auto rhs = condense_schur_rhs (A_inv,C,f,g); // Delays computation
+ const VectorType rhs = condense_schur_rhs (A_inv,C,f,g);
+ deallog.pop();
+ deallog.push("Main");
+ y = S_inv * rhs;
+ deallog.pop();
+ deallog.push("Post");
+ x = postprocess_schur_solution (A_inv,B,y,f);
+ deallog.pop();
+
+ constraints.distribute (solution_tmp);
+ solution = solution_tmp;
+
+ deallog.pop();
+ }
+
+ // Check output of reference solution
+ // solution = solution_reference;
+
+ // The solutions will be slightly different since the solvers are
+ // configured differently. So we compute the relative error between
+ // them and expect them to remain within a reasonable margin of
+ // one another
+ // Similar goes for the output of the solution at points evaluated
+ // in check_periodicity: The solution here will not necessarily coincide
+ // exactly with the parent version of this test
+ TrilinosWrappers::MPI::BlockVector local_soln_ref(owned_partitioning,
+ mpi_communicator);
+ local_soln_ref = solution_reference;
+ const double ref_local_norm = local_soln_ref.l2_norm();
+ const double ref_global_norm = std::sqrt(Utilities::MPI::sum(
+ ref_local_norm * ref_local_norm,
+ mpi_communicator));
+
+ TrilinosWrappers::MPI::BlockVector soln_diff(owned_partitioning,
+ mpi_communicator);
+ soln_diff = solution;
+ soln_diff -= local_soln_ref;
+ const double local_error = soln_diff.l2_norm();
+ const double global_error = std::sqrt(Utilities::MPI::sum(
+ local_error * local_error,
+ mpi_communicator));
+ deallog
+ << "Rel. norm of error in SchurOperator solve: " << global_error/ref_global_norm
+ << std::endl;
+
+ // Check solution
+ AssertThrow(global_error/ref_global_norm < 1e-3,
+ ExcMessage("Solution does not match reference result"));
+ }
+
+ template <int dim>
+ void StokesProblem<dim>::get_point_value
+ (const Point<dim> point, const int proc, Vector<double> &value) const
+ {
+ typename DoFHandler<dim>::active_cell_iterator cell
+ = GridTools::find_active_cell_around_point (dof_handler, point);
+
+ if (cell->is_locally_owned())
+ VectorTools::point_value (dof_handler, solution,
+ point, value);
+
+ std::vector<double> tmp (value.size());
+ for (unsigned int i=0; i<value.size(); ++i)
+ tmp[i]=value[i];
+
+ std::vector<double> tmp2 (value.size());
+ MPI_Reduce(&(tmp[0]), &(tmp2[0]), value.size(), MPI_DOUBLE,
+ MPI_SUM, proc, mpi_communicator);
+
+ for (unsigned int i=0; i<value.size(); ++i)
+ value[i]=tmp2[i];
+ }
+
+ template <int dim>
+ void StokesProblem<dim>::check_periodicity (const unsigned int cycle) const
+ {}
+
+ template <>
+ void StokesProblem<2>::check_periodicity (const unsigned int cycle) const
+ {
+ unsigned int n_points = 4;
+ for (unsigned int i = 0; i<cycle; i++)
+ n_points*=2;
+
+ //don't test exactly at the support points, since point_value is not stable there
+ const double eps = 1./(16.*n_points);
+
+ for (unsigned int i=1; i< n_points; i++)
+ {
+ Vector<double> value1(3);
+ Vector<double> value2(3);
+
+ Point<2> point1;
+ point1(0)=0;
+ point1(1)=.5*(1.+1.*i/n_points+eps);
+ Point<2> point2;
+ point2(0)=.5*(1.+1.*i/n_points+eps);
+ point2(1)=0.;
+
+ get_point_value (point1, 0, value1);
+ get_point_value (point2, 0, value2);
+
+ if (Utilities::MPI::this_mpi_process(mpi_communicator)==0)
+ {
+ pcout << point1 << "\t" << value1[0] << "\t" << value1[1] << std::endl;
+ if (std::abs(value2[0]-value1[1])>1e-8)
+ {
+ std::cout<<point1<< "\t" << value1[1] << std::endl;
+ std::cout<<point2<< "\t" << value2[0] << std::endl;
+ Assert(false, ExcInternalError());
+ }
+ if (std::abs(value2[1]+value1[0])>1e-8)
+ {
+ std::cout<<point1<< "\t" << value1[0] << std::endl;
+ std::cout<<point2<< "\t" << value2[1] << std::endl;
+ Assert(false, ExcInternalError());
+ }
+ }
+ }
+ }
+
+
+ template <int dim>
+ void
+ StokesProblem<dim>::output_results (const unsigned int refinement_cycle) const
+ {
+ std::vector<std::string> solution_names (dim, "velocity");
+ solution_names.push_back ("pressure");
+
+ std::vector<DataComponentInterpretation::DataComponentInterpretation>
+ data_component_interpretation
+ (dim, DataComponentInterpretation::component_is_part_of_vector);
+ data_component_interpretation
+ .push_back (DataComponentInterpretation::component_is_scalar);
+
+ DataOut<dim> data_out;
+ data_out.attach_dof_handler (dof_handler);
+ data_out.add_data_vector (solution, solution_names,
+ DataOut<dim>::type_dof_data,
+ data_component_interpretation);
+ Vector<float> subdomain (triangulation.n_active_cells());
+ for (unsigned int i=0; i<subdomain.size(); ++i)
+ subdomain(i) = triangulation.locally_owned_subdomain();
+ data_out.add_data_vector (subdomain, "subdomain");
+ data_out.build_patches (degree+1);
+
+ std::ostringstream filename;
+ filename << "solution-"
+ << Utilities::int_to_string (refinement_cycle, 2)
+ << "."
+ << Utilities::int_to_string (triangulation.locally_owned_subdomain(),2)
+ << ".vtu";
+
+ std::ofstream output (filename.str().c_str());
+ data_out.write_vtu (output);
+
+ if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
+ {
+ std::vector<std::string> filenames;
+ for (unsigned int i=0; i<Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
+ filenames.push_back (std::string("solution-") +
+ Utilities::int_to_string (refinement_cycle, 2) +
+ "." +
+ Utilities::int_to_string(i, 2) +
+ ".vtu");
+ const std::string
+ pvtu_master_filename = ("solution-" +
+ Utilities::int_to_string (refinement_cycle, 2) +
+ ".pvtu");
+ std::ofstream pvtu_master (pvtu_master_filename.c_str());
+ data_out.write_pvtu_record (pvtu_master, filenames);
+ }
+ }
+
+
+
+ template <int dim>
+ void
+ StokesProblem<dim>::refine_mesh ()
+ {
+
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+
+ FEValuesExtractors::Scalar pressure(dim);
+ KellyErrorEstimator<dim>::estimate (dof_handler,
+ QGauss<dim-1>(degree+1),
+ typename FunctionMap<dim>::type(),
+ solution,
+ estimated_error_per_cell,
+ fe.component_mask(pressure));
+
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_number (triangulation,
+ estimated_error_per_cell,
+ 0.3, 0.0);
+ triangulation.execute_coarsening_and_refinement ();
+ }
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::run ()
+ {
+ Point<dim> center;
+ const double inner_radius = .5;
+ const double outer_radius = 1.;
+
+ GridGenerator::quarter_hyper_shell (triangulation,
+ center,
+ inner_radius,
+ outer_radius,
+ 0,
+ true);
+
+#ifdef PERIODIC
+ std::vector<GridTools::PeriodicFacePair<typename parallel::distributed::Triangulation<dim>::cell_iterator> >
+ periodicity_vector;
+
+ FullMatrix<double> matrix(dim);
+ matrix[0][1]=1.;
+ matrix[1][0]=-1.;
+
+ std::vector<unsigned int> first_vector_components;
+ first_vector_components.push_back(0);
+
+ GridTools::collect_periodic_faces(
+ triangulation, 2, 3, 1, periodicity_vector, Tensor<1, dim>(), matrix);
+
+ triangulation.add_periodicity(periodicity_vector);
+#endif
+
+
+ triangulation.set_boundary(0, boundary);
+ triangulation.set_boundary(1, boundary);
+
+ triangulation.refine_global (4-dim);
+
+ for (unsigned int refinement_cycle = 0; refinement_cycle<3;
+ ++refinement_cycle)
+ {
+ pcout << "Refinement cycle " << refinement_cycle << std::endl;
+
+ if (refinement_cycle > 0)
+ refine_mesh ();
+
+ setup_dofs ();
+
+ pcout << " Assembling..." << std::endl << std::flush;
+ assemble_system ();
+
+ pcout << " Solving..." << std::endl << std::flush;
+ solve ();
+
+#ifdef PERIODIC
+ check_periodicity(refinement_cycle);
+#endif
+ // output_results (refinement_cycle);
+
+ pcout << std::endl;
+ }
+ }
+}
+
+
+
+int main (int argc, char *argv[])
+{
+ try
+ {
+ using namespace dealii;
+ using namespace Step22;
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD)==0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile, false);
+ deallog.threshold_double(1.e-10);
+ {
+ StokesProblem<2> flow_problem(1);
+ flow_problem.run ();
+ }
+ }
+ else
+ {
+ StokesProblem<2> flow_problem(1);
+ flow_problem.run ();
+ }
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ }
+
+ return 0;
+}
--- /dev/null
+Refinement cycle 0
+ Assembling...
+ Solving...
+DEAL:ManualSchur:cg::Starting value 7.49608e-06
+DEAL:ManualSchur:cg::Convergence step 11 value 0
+DEAL:SchurComplementOp:Pre::Convergence step 74 value 0
+DEAL:SchurComplementOp:Main::Convergence step 12 value 3.95620e-10
+DEAL:SchurComplementOp:Post::Convergence step 74 value 0
+DEAL::Rel. norm of error in SchurOperator solve: 0.000618353
+0.00000 0.632812 -1.58049 0.00000
+0.00000 0.757812 -1.31971 0.00000
+0.00000 0.882812 -1.13281 0.00000
+
+Refinement cycle 1
+ Assembling...
+ Solving...
+DEAL:ManualSchur:cg::Starting value 5.38429e-06
+DEAL:ManualSchur:cg::Convergence step 11 value 0
+DEAL:SchurComplementOp:Pre::Convergence step 104 value 0
+DEAL:SchurComplementOp:Main::Convergence step 11 value 7.22417e-10
+DEAL:SchurComplementOp:Post::Convergence step 104 value 0
+DEAL::Rel. norm of error in SchurOperator solve: 0.000297273
+0.00000 0.566406 -1.76557 0.00000
+0.00000 0.628906 -1.59010 0.00000
+0.00000 0.691406 -1.44635 0.00000
+0.00000 0.753906 -1.32649 0.00000
+0.00000 0.816406 -1.22485 0.00000
+0.00000 0.878906 -1.13782 0.00000
+0.00000 0.941406 -1.06222 0.00000
+
+Refinement cycle 2
+ Assembling...
+ Solving...
+DEAL:ManualSchur:cg::Starting value 6.10232e-06
+DEAL:ManualSchur:cg::Convergence step 12 value 0
+DEAL:SchurComplementOp:Pre::Convergence step 153 value 0
+DEAL:SchurComplementOp:Main::Convergence step 12 value 6.47021e-10
+DEAL:SchurComplementOp:Post::Convergence step 153 value 0
+DEAL::Rel. norm of error in SchurOperator solve: 0.000405072
+0.00000 0.533203 -1.87543 0.00000
+0.00000 0.564453 -1.77163 0.00000
+0.00000 0.595703 -1.67869 0.00000
+0.00000 0.626953 -1.59502 0.00000
+0.00000 0.658203 -1.51929 0.00000
+0.00000 0.689453 -1.45043 0.00000
+0.00000 0.720703 -1.38754 0.00000
+0.00000 0.751953 -1.32988 0.00000
+0.00000 0.783203 -1.27680 0.00000
+0.00000 0.814453 -1.22782 0.00000
+0.00000 0.845703 -1.18244 0.00000
+0.00000 0.876953 -1.14032 0.00000
+0.00000 0.908203 -1.10107 0.00000
+0.00000 0.939453 -1.06445 0.00000
+0.00000 0.970703 -1.03018 0.00000
+
--- /dev/null
+Refinement cycle 0
+ Assembling...
+ Solving...
+DEAL:ManualSchur:cg::Starting value 7.49608e-06
+DEAL:ManualSchur:cg::Convergence step 11 value 0
+DEAL:SchurComplementOp:Pre::Convergence step 74 value 0
+DEAL:SchurComplementOp:Main::Convergence step 12 value 3.95620e-10
+DEAL:SchurComplementOp:Post::Convergence step 74 value 0
+DEAL::Rel. norm of error in SchurOperator solve: 0.000618353
+0.00000 0.632812 -1.58049 0.00000
+0.00000 0.757812 -1.31971 0.00000
+0.00000 0.882812 -1.13281 0.00000
+
+Refinement cycle 1
+ Assembling...
+ Solving...
+DEAL:ManualSchur:cg::Starting value 5.38732e-06
+DEAL:ManualSchur:cg::Convergence step 11 value 0
+DEAL:SchurComplementOp:Pre::Convergence step 104 value 0
+DEAL:SchurComplementOp:Main::Convergence step 11 value 7.22417e-10
+DEAL:SchurComplementOp:Post::Convergence step 104 value 0
+DEAL::Rel. norm of error in SchurOperator solve: 0.000297050
+0.00000 0.566406 -1.76557 0.00000
+0.00000 0.628906 -1.59010 0.00000
+0.00000 0.691406 -1.44635 0.00000
+0.00000 0.753906 -1.32649 0.00000
+0.00000 0.816406 -1.22485 0.00000
+0.00000 0.878906 -1.13782 0.00000
+0.00000 0.941406 -1.06222 0.00000
+
+Refinement cycle 2
+ Assembling...
+ Solving...
+DEAL:ManualSchur:cg::Starting value 6.10415e-06
+DEAL:ManualSchur:cg::Convergence step 12 value 0
+DEAL:SchurComplementOp:Pre::Convergence step 153 value 0
+DEAL:SchurComplementOp:Main::Convergence step 12 value 6.47021e-10
+DEAL:SchurComplementOp:Post::Convergence step 153 value 0
+DEAL::Rel. norm of error in SchurOperator solve: 0.000404964
+0.00000 0.533203 -1.87543 0.00000
+0.00000 0.564453 -1.77163 0.00000
+0.00000 0.595703 -1.67869 0.00000
+0.00000 0.626953 -1.59502 0.00000
+0.00000 0.658203 -1.51929 0.00000
+0.00000 0.689453 -1.45043 0.00000
+0.00000 0.720703 -1.38754 0.00000
+0.00000 0.751953 -1.32988 0.00000
+0.00000 0.783203 -1.27680 0.00000
+0.00000 0.814453 -1.22782 0.00000
+0.00000 0.845703 -1.18244 0.00000
+0.00000 0.876953 -1.14032 0.00000
+0.00000 0.908203 -1.10107 0.00000
+0.00000 0.939453 -1.06445 0.00000
+0.00000 0.970703 -1.03018 0.00000
+