--- /dev/null
+//---------------------------------------------------------------------------
+// $Id: petsc_matrix_free.h 26043 2012-09-24 19:25:57Z steigemann $
+// Version: $Name$
+//
+// Copyright (C) 2012 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#ifndef __deal2__petsc_matrix_free_h
+#define __deal2__petsc_matrix_free_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_USE_PETSC
+
+# include <deal.II/lac/exceptions.h>
+# include <deal.II/lac/petsc_matrix_base.h>
+# include <deal.II/lac/petsc_vector.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+
+namespace PETScWrappers
+{
+/**
+ * Implementation of a parallel matrix class based on PETSc <tt>MatShell</tt> matrix-type.
+ * This base class implements only the interface to the PETSc matrix object,
+ * while all the functionality is contained in the matrix-vector
+ * multiplication which must be reimplmented in derived classes.
+ *
+ * This interface is an addition to the dealii::MatrixFree class to realize
+ * user-defined matrix-classes together with PETSc solvers and functionalities.
+ * See also the documentation of dealii::MatrixFree class and step-37 and step-48.
+ *
+ * Similar to other @p PETScWrappers::*::*Matrix classes, the MatrxiFree class
+ * provides the usual matrix-vector multiplication
+ * <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
+ * which is pure virtual and must be reimplemented in derived classes.
+ * Besides the usual interface, this class has a matrix-vector multiplication
+ * <tt>vmult(Vec &dst, const Vec &src)</tt>
+ * taking PETSc Vec objects, which will be called by
+ * <tt>matrix_free_mult(Mat A, Vec src, Vec dst)</tt>
+ * registered as matrix-vector multiplication of this PETSc matrix object.
+ * The default implementation of the vmult function in the base class translates
+ * the given PETSc <tt>Vec*</tt> vectors into a deal.II vector, calls
+ * the usual vmult function with the usual interface and converts
+ * the result back to PETSc Vec*. This could be made much more efficient
+ * in derived classes without allocating new memory.
+ *
+ * @ingroup PETScWrappers
+ * @ingroup Matrix1
+ * @author Wolfgang Bangerth, Martin Steigemann, 2012
+ */
+ class MatrixFree : public MatrixBase
+ {
+ public:
+
+ /**
+ * Default constructor. Create an
+ * empty matrix object.
+ */
+ MatrixFree ();
+
+ /**
+ * Create a matrix object of
+ * dimensions @p m times @p n
+ * with communication happening
+ * over the provided @p communicator.
+ *
+ * For the meaning of the @p local_rows
+ * and @p local_columns parameters,
+ * see the PETScWrappers::MPI::SparseMatrix
+ * class documentation.
+ *
+ * As other PETSc matrices, also the
+ * the matrix-free object needs to
+ * have a size and to perform matrix
+ * vector multiplications efficiently
+ * in parallel also @p local_rows
+ * and @p local_columns. But in contrast
+ * to PETSc::SparseMatrix classes a
+ * PETSc matrix-free object does not need
+ * any estimation of non_zero entries
+ * and has no option <tt>is_symmetric</tt>.
+ */
+ MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Create a matrix object of
+ * dimensions @p m times @p n
+ * with communication happening
+ * over the provided @p communicator.
+ *
+ * As other PETSc matrices, also the
+ * the matrix-free object needs to
+ * have a size and to perform matrix
+ * vector multiplications efficiently
+ * in parallel also @p local_rows
+ * and @p local_columns. But in contrast
+ * to PETSc::SparseMatrix classes a
+ * PETSc matrix-free object does not need
+ * any estimation of non_zero entries
+ * and has no option <tt>is_symmetric</tt>.
+ */
+ MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Constructor for the serial case:
+ * Same function as
+ * <tt>MatrixFree()</tt>, see above,
+ * with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Constructor for the serial case:
+ * Same function as
+ * <tt>MatrixFree()</tt>, see above,
+ * with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Throw away the present matrix and
+ * generate one that has the same
+ * properties as if it were created by
+ * the constructor of this class with
+ * the same argument list as the
+ * present function.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Throw away the present matrix and
+ * generate one that has the same
+ * properties as if it were created by
+ * the constructor of this class with
+ * the same argument list as the
+ * present function.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Calls the @p reinit() function
+ * above with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Calls the @p reinit() function
+ * above with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor.
+ */
+ void clear ();
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with
+ * this matrix.
+ */
+ const MPI_Comm & get_mpi_communicator () const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix (of type
+ * PETScWrappers::MPI::SparseMatrix),
+ * then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void vmult (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Matrix-vector multiplication: let
+ * <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this matrix. This
+ * function does the same as @p vmult()
+ * but takes the transposed matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void Tvmult (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void vmult_add (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as @p vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void Tvmult_add (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * The matrix-vector multiplication
+ * called by @p matrix_free_mult().
+ * This function can be reimplemented
+ * in derived classes for efficiency. The default
+ * implementation copies the given vectors
+ * into PETScWrappers::*::Vector
+ * and calls <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
+ * which is purely virtual and must be reimplemented
+ * in derived classes.
+ */
+ virtual
+ void vmult (Vec &dst, const Vec &src) const;
+
+ private:
+
+ /**
+ * Copy of the communicator object to
+ * be used for this parallel matrix-free object.
+ */
+ MPI_Comm communicator;
+
+ /**
+ * Callback-function registered
+ * as the matrix-vector multiplication
+ * of this matrix-free object
+ * called by PETSc routines.
+ * This function must be static and
+ * takes a PETSc matrix @p A,
+ * and vectors @p src and @p dst,
+ * where <i>dst = A*src</i>
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * This function calls
+ * <tt>vmult(Vec &dst, const Vec &src)</tt>
+ * which should be reimplemented in
+ * derived classes.
+ */
+ static int matrix_free_mult (Mat A, Vec src, Vec dst);
+
+ /**
+ * Do the actual work for the
+ * respective @p reinit() function and
+ * the matching constructor,
+ * i.e. create a matrix object. Getting rid
+ * of the previous matrix is left to
+ * the caller.
+ */
+ void do_reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+ };
+
+
+
+// -------- template and inline functions ----------
+
+ inline
+ const MPI_Comm &
+ MatrixFree::get_mpi_communicator () const
+ {
+ return communicator;
+ }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_USE_PETSC
+
+
+/*---------------------------- petsc_matrix_free.h ---------------------------*/
+
+#endif
+/*---------------------------- petsc_matrix_free.h ---------------------------*/
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id: petsc_matrix_free.cc 26043 2012-09-24 19:25:57Z steigemann $
+// Version: $Name$
+//
+// Copyright (C) 2012 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+#include <deal.II/lac/petsc_matrix_free.h>
+
+#ifdef DEAL_II_USE_PETSC
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace PETScWrappers
+{
+ MatrixFree::MatrixFree ()
+ : communicator (PETSC_COMM_SELF)
+ {
+ const int m=0;
+ do_reinit (m, m, m, m);
+ }
+
+
+
+ MatrixFree::MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns)
+ : communicator (communicator)
+ {
+ do_reinit (m, n, local_rows, local_columns);
+ }
+
+
+
+ MatrixFree::MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process)
+ : communicator (communicator)
+ {
+ Assert (local_rows_per_process.size() == local_columns_per_process.size(),
+ ExcDimensionMismatch (local_rows_per_process.size(),
+ local_columns_per_process.size()));
+ Assert (this_process < local_rows_per_process.size(),
+ ExcInternalError());
+
+ do_reinit (m, n,
+ local_rows_per_process[this_process],
+ local_columns_per_process[this_process]);
+ }
+
+
+
+ MatrixFree::MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns)
+ : communicator (MPI_COMM_WORLD)
+ {
+ do_reinit (m, n, local_rows, local_columns);
+ }
+
+
+
+ MatrixFree::MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process)
+ : communicator (MPI_COMM_WORLD)
+ {
+ Assert (local_rows_per_process.size() == local_columns_per_process.size(),
+ ExcDimensionMismatch (local_rows_per_process.size(),
+ local_columns_per_process.size()));
+ Assert (this_process < local_rows_per_process.size(),
+ ExcInternalError());
+
+ do_reinit (m, n,
+ local_rows_per_process[this_process],
+ local_columns_per_process[this_process]);
+ }
+
+
+
+ void MatrixFree::reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns)
+ {
+ this->communicator = communicator;
+
+ // destroy the matrix and
+ // generate a new one
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+ int ierr = MatDestroy (matrix);
+#else
+ int ierr = MatDestroy (&matrix);
+#endif
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ do_reinit (m, n, local_rows, local_columns);
+ }
+
+
+
+ void MatrixFree::reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process)
+ {
+ Assert (local_rows_per_process.size() == local_columns_per_process.size(),
+ ExcDimensionMismatch (local_rows_per_process.size(),
+ local_columns_per_process.size()));
+ Assert (this_process < local_rows_per_process.size(),
+ ExcInternalError());
+
+ this->communicator = communicator;
+
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+ int ierr = MatDestroy (matrix);
+#else
+ int ierr = MatDestroy (&matrix);
+#endif
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ do_reinit (m, n,
+ local_rows_per_process[this_process],
+ local_columns_per_process[this_process]);
+ }
+
+
+
+ void MatrixFree::reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns)
+ {
+ reinit (MPI_COMM_WORLD, m, n, local_rows, local_columns);
+ }
+
+
+
+ void MatrixFree::reinit (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process)
+ {
+ reinit (MPI_COMM_WORLD, m, n, local_rows_per_process, local_columns_per_process, this_process);
+ }
+
+
+
+ void MatrixFree::clear ()
+ {
+#if DEAL_II_PETSC_VERSION_LT(3,2,0)
+ int ierr = MatDestroy (matrix);
+#else
+ int ierr = MatDestroy (&matrix);
+#endif
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ const int m=0;
+ do_reinit (m, m, m, m);
+ }
+
+
+
+ void MatrixFree::vmult (Vec &dst, const Vec &src) const
+ {
+
+//TODO: Translate the given PETSc Vec* vector into a deal.II
+// vector so we can call the vmult function with the usual
+// interface; then convert back. This could be much more
+// efficient, if the PETScWrappers::*::Vector classes
+// had a way to simply generate such a vector object from
+// a given PETSc Vec* object without allocating new memory
+// and without taking ownership of the Vec*
+
+ VectorBase *x = 0;
+ VectorBase *y = 0;
+ // because we do not know,
+ // if dst and src are sequential
+ // or distributed vectors,
+ // we ask for the vector-type
+ // and reinit x and y with
+ // dealii::PETScWrappers::*::Vector:
+ const char *vec_type;
+ int ierr = VecGetType (src, &vec_type);
+
+ PetscInt local_size;
+ ierr = VecGetLocalSize (src, &local_size);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ if (strcmp(vec_type,"mpi") == 0)
+ {
+ PetscInt size;
+ ierr = VecGetSize (src, &size);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ x = new PETScWrappers::MPI::Vector (this->get_mpi_communicator (), size, local_size);
+ y = new PETScWrappers::MPI::Vector (this->get_mpi_communicator (), size, local_size);
+ }
+ else if (strcmp(vec_type,"seq") == 0)
+ {
+ x = new PETScWrappers::Vector (local_size);
+ y = new PETScWrappers::Vector (local_size);
+ }
+ else
+ AssertThrow (false, ExcMessage("PETScWrappers::MPI::MatrixFree::do_matrix_vector_action: "
+ "This only works for Petsc Vec Type = VECMPI | VECSEQ"));
+
+ // copy src to x
+ x->equ(1., PETScWrappers::VectorBase(src));
+ // and call vmult(x,y) which must
+ // be reimplemented in derived classes
+ vmult (*y, *x);
+
+ y->compress();
+ // copy the result back to dst
+ ierr = VecCopy (&(*(*y)), dst);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ delete (x);
+ delete (y);
+ }
+
+
+
+ int MatrixFree::matrix_free_mult (Mat A, Vec src, Vec dst)
+ {
+ // create a pointer to this MatrixFree
+ // object and link the given matrix A
+ // to the matrix-vector multiplication
+ // of this MatrixFree object,
+ MatrixFree *this_object;
+ int ierr = MatShellGetContext (A, &this_object);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ // call vmult of this object:
+ this_object->vmult (dst, src);
+
+ return (0);
+ }
+
+
+
+ void MatrixFree::do_reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns)
+ {
+ Assert (local_rows <= m, ExcDimensionMismatch (local_rows, m));
+ Assert (local_columns <= n, ExcDimensionMismatch (local_columns, n));
+
+ int ierr;
+ // create a PETSc MatShell matrix-type
+ // object of dimension m x n and local size
+ // local_rows x local_columns
+ ierr = MatCreateShell(communicator, local_rows, local_columns, m, n, (void*)this, &matrix);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+ // register the MatrixFree::matrix_free_mult function
+ // as the matrix multiplication used by this matrix
+ ierr = MatShellSetOperation (matrix, MATOP_MULT,
+ (void(*)(void))&dealii::PETScWrappers::MatrixFree::matrix_free_mult);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = MatSetFromOptions (matrix);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+ }
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_USE_PETSC