* This class implements an algebraic multigrid (AMG) preconditioner
* based on the Trilinos ML implementation. What this class does is
* twofold. When the initialize() function is invoked, a ML
- * preconditioner object is created based on the matrix
- * that we want the preconditioner to be based on. A call of the
- * respective <code>vmult</code> function does call the respective
- * operation in the Trilinos package, where it is called
+ * preconditioner object is created based on the matrix that we want
+ * the preconditioner to be based on. A call of the respective
+ * <code>vmult</code> function does call the respective operation in
+ * the Trilinos package, where it is called
* <code>ApplyInverse</code>. Use of this class is explained in the
* @ref step_31 "step-31" tutorial program.
*
* fact that some of the entries in the preconditioner matrix are zero
* and hence can be neglected.
*
- * The implementation is able to distinguish between matrices from
- * elliptic problems and convection dominated problems. We use the standard
- * options provided by Trilinos ML for elliptic problems, except that we use a
- * Chebyshev smoother instead of a symmetric Gauss-Seidel smoother.
- * For most elliptic problems, Chebyshev is better than Gauss-Seidel (SSOR).
+ * The implementation is able to distinguish between matrices from
+ * elliptic problems and convection dominated problems. We use the
+ * standard options provided by Trilinos ML for elliptic problems,
+ * except that we use a Chebyshev smoother instead of a symmetric
+ * Gauss-Seidel smoother. For most elliptic problems, Chebyshev is
+ * better than Gauss-Seidel (SSOR).
*
* @ingroup TrilinosWrappers
* @ingroup Preconditioners
* multilevel hierarchy for the
* solution of a linear system
* with the given matrix. The
- * function uses the matrix
- * format specified in
- * TrilinosWrappers::SparseMatrix.
+ * function uses the matrix
+ * format specified in
+ * TrilinosWrappers::SparseMatrix.
*/
void initialize (const SparseMatrix &matrix,
const bool elliptic = true,
* multilevel hierarchy for the
* solution of a linear system
* with the given matrix. This
- * function takes a deal.ii matrix
- * and copies the content into a
- * Trilinos matrix, so the function
- * can be considered rather
- * inefficient.
+ * function takes a deal.ii
+ * matrix and copies the
+ * content into a Trilinos
+ * matrix, so the function can
+ * be considered rather
+ * inefficient.
*/
void initialize (const ::dealii::SparseMatrix<double> &deal_ii_sparse_matrix,
const bool elliptic = true,
const bool output_details = false);
/**
- * This function can be used
- * for a faster recalculation of
- * the preconditioner construction
- * when the matrix entries
- * underlying the preconditioner
- * have changed,
- * but the matrix sparsity pattern
- * has remained the same. What this
- * function does is to take the
- * already generated coarsening
- * structure, compute the AMG
- * prolongation and restriction
- * according to a smoothed aggregation
- * strategy and then builds the whole
- * multilevel hiearchy. This function
- * can be considerably faster in that
- * case, since the coarsening pattern
- * is usually the most difficult thing
- * to do when setting up the
- * AMG ML preconditioner.
+ * This function can be used
+ * for a faster recalculation
+ * of the preconditioner
+ * construction when the matrix
+ * entries underlying the
+ * preconditioner have changed,
+ * but the matrix sparsity
+ * pattern has remained the
+ * same. What this function
+ * does is to take the already
+ * generated coarsening
+ * structure, compute the AMG
+ * prolongation and restriction
+ * according to a smoothed
+ * aggregation strategy and
+ * then builds the whole
+ * multilevel hiearchy. This
+ * function can be considerably
+ * faster in that case, since
+ * the coarsening pattern is
+ * usually the most difficult
+ * thing to do when setting up
+ * the AMG ML preconditioner.
*/
void reinit ();
*/
boost::shared_ptr<ML_Epetra::MultiLevelPreconditioner> multigrid_operator;
+ /**
+ * Internal communication
+ * pattern in case the matrix
+ * needs to be copied from
+ * deal.II format.
+ */
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
Epetra_MpiComm communicator;
#else
Epetra_SerialComm communicator;
#endif
+
+ /**
+ * Internal Trilinos map in
+ * case the matrix needs to be
+ * copied from deal.II format.
+ */
boost::shared_ptr<Epetra_Map> Map;
/**
namespace MatrixIterators
{
/**
- * STL conforming iterator. This class acts as an iterator walking over the
- * elements of Trilinos matrices. The implementation of this class is similar
- * to the one for PETSc matrices.
+ * STL conforming iterator. This class acts as an iterator walking
+ * over the elements of Trilinos matrices. The implementation of this
+ * class is similar to the one for PETSc matrices.
*
- * Note that Trilinos does not give any guarantees as to the order of elements
- * within each row. Note also that accessing the elements of a full matrix
- * surprisingly only shows the nonzero elements of the matrix, not all
- * elements.
+ * Note that Trilinos does not give any guarantees as to the order of
+ * elements within each row. Note also that accessing the elements of
+ * a full matrix surprisingly only shows the nonzero elements of the
+ * matrix, not all elements.
*
* @ingroup TrilinosWrappers
* @author Martin Kronbichler, Wolfgang Bangerth, 2008
class const_iterator
{
private:
- /**
- * Accessor class for iterators
- */
+ /**
+ * Accessor class for iterators
+ */
class Accessor
{
public:
- /**
- * Constructor. Since we use
- * accessors only for read
- * access, a const matrix
- * pointer is sufficient.
- */
+ /**
+ * Constructor. Since we use
+ * accessors only for read
+ * access, a const matrix
+ * pointer is sufficient.
+ */
Accessor (const SparseMatrix *matrix,
const unsigned int row,
const unsigned int index);
- /**
- * Row number of the element
- * represented by this
- * object.
- */
+ /**
+ * Row number of the element
+ * represented by this object.
+ */
unsigned int row() const;
- /**
- * Index in row of the element
- * represented by this
- * object.
- */
+ /**
+ * Index in row of the element
+ * represented by this object.
+ */
unsigned int index() const;
- /**
- * Column number of the
- * element represented by
- * this object.
- */
+ /**
+ * Column number of the element
+ * represented by this object.
+ */
unsigned int column() const;
- /**
- * Value of this matrix entry.
- */
+ /**
+ * Value of this matrix entry.
+ */
TrilinosScalar value() const;
- /**
- * Exception
- */
+ /**
+ * Exception
+ */
DeclException0 (ExcBeyondEndOfMatrix);
- /**
- * Exception
- */
+
+ /**
+ * Exception
+ */
DeclException3 (ExcAccessToNonlocalRow,
int, int, int,
<< "You tried to access row " << arg1
<< " are stored locally and can be accessed.");
private:
- /**
- * The matrix accessed.
- */
+ /**
+ * The matrix accessed.
+ */
mutable SparseMatrix *matrix;
- /**
- * Current row number.
- */
+ /**
+ * Current row number.
+ */
unsigned int a_row;
- /**
- * Current index in row.
- */
+ /**
+ * Current index in row.
+ */
unsigned int a_index;
- /**
- * Cache where we store the
- * column indices of the present
- * row. This is necessary, since
- * Trilinos makes access to the
- * elements of its matrices
- * rather hard, and it is much
- * more efficient to copy all
- * column entries of a row once
- * when we enter it than
- * repeatedly asking Trilinos for
- * individual ones. This also
- * makes some sense since it is
- * likely that we will access
- * them sequentially anyway.
- *
- * In order to make copying of
- * iterators/accessor of
- * acceptable performance, we
- * keep a shared pointer to these
- * entries so that more than one
- * accessor can access this data
- * if necessary.
- */
+ /**
+ * Cache where we store the
+ * column indices of the
+ * present row. This is
+ * necessary, since Trilinos
+ * makes access to the elements
+ * of its matrices rather hard,
+ * and it is much more
+ * efficient to copy all column
+ * entries of a row once when
+ * we enter it than repeatedly
+ * asking Trilinos for
+ * individual ones. This also
+ * makes some sense since it is
+ * likely that we will access
+ * them sequentially anyway.
+ *
+ * In order to make copying of
+ * iterators/accessor of
+ * acceptable performance, we
+ * keep a shared pointer to
+ * these entries so that more
+ * than one accessor can access
+ * this data if necessary.
+ */
boost::shared_ptr<const std::vector<unsigned int> > colnum_cache;
- /**
- * Similar cache for the values
- * of this row.
- */
+ /**
+ * Similar cache for the values
+ * of this row.
+ */
boost::shared_ptr<const std::vector<TrilinosScalar> > value_cache;
- /**
- * Discard the old row caches
- * (they may still be used by
- * other accessors) and generate
- * new ones for the row pointed
- * to presently by this accessor.
- */
+ /**
+ * Discard the old row caches
+ * (they may still be used by
+ * other accessors) and
+ * generate new ones for the
+ * row pointed to presently by
+ * this accessor.
+ */
void visit_present_row ();
- /**
- * Make enclosing class a
- * friend.
- */
+ /**
+ * Make enclosing class a
+ * friend.
+ */
friend class const_iterator;
};
public:
- /**
- * Constructor. Create an iterator
- * into the matrix @p matrix for the
- * given row and the index within it.
- */
+ /**
+ * Constructor. Create an
+ * iterator into the matrix @p
+ * matrix for the given row and
+ * the index within it.
+ */
const_iterator (const SparseMatrix *matrix,
const unsigned int row,
const unsigned int index);
- /**
- * Prefix increment.
- */
+ /**
+ * Prefix increment.
+ */
const_iterator& operator++ ();
- /**
- * Postfix increment.
- */
+ /**
+ * Postfix increment.
+ */
const_iterator operator++ (int);
- /**
- * Dereferencing operator.
- */
+ /**
+ * Dereferencing operator.
+ */
const Accessor& operator* () const;
- /**
- * Dereferencing operator.
- */
+ /**
+ * Dereferencing operator.
+ */
const Accessor* operator-> () const;
- /**
- * Comparison. True, if
- * both iterators point to
- * the same matrix
- * position.
- */
+ /**
+ * Comparison. True, if both
+ * iterators point to the same
+ * matrix position.
+ */
bool operator == (const const_iterator&) const;
- /**
- * Inverse of <tt>==</tt>.
- */
+
+ /**
+ * Inverse of <tt>==</tt>.
+ */
bool operator != (const const_iterator&) const;
- /**
- * Comparison
- * operator. Result is true
- * if either the first row
- * number is smaller or if
- * the row numbers are
- * equal and the first
- * index is smaller.
- */
+ /**
+ * Comparison operator. Result
+ * is true if either the first
+ * row number is smaller or if
+ * the row numbers are equal
+ * and the first index is
+ * smaller.
+ */
bool operator < (const const_iterator&) const;
- /**
- * Exception
- */
+ /**
+ * Exception
+ */
DeclException2 (ExcInvalidIndexWithinRow,
int, int,
<< "Attempt to access element " << arg2
<< " which doesn't have that many elements.");
private:
- /**
- * Store an object of the
- * accessor class.
- */
+ /**
+ * Store an object of the
+ * accessor class.
+ */
Accessor accessor;
};
/**
- * This class implements a wrapper to use the Trilinos distributed sparse matrix
- * class Epetra_FECrsMatrix. This is precisely the kind of matrix we deal with
- * all the time - we most likely get it from some assembly process, where also
- * entries not locally owned might need to be written and hence need to be
- * forwarded to the owner process.
- * This class is designed to be used in a distributed
- * memory architecture with an MPI compiler on the bottom, but works equally
- * well also for serial processes. The only requirement for this class to
- * work is that Trilinos has been installed with the same compiler as is used
- * for generating deal.II.
+ * This class implements a wrapper to use the Trilinos distributed
+ * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind
+ * of matrix we deal with all the time - we most likely get it from
+ * some assembly process, where also entries not locally owned might
+ * need to be written and hence need to be forwarded to the owner
+ * process. This class is designed to be used in a distributed memory
+ * architecture with an MPI compiler on the bottom, but works equally
+ * well also for serial processes. The only requirement for this class
+ * to work is that Trilinos has been installed with the same compiler
+ * as is used for generating deal.II.
*
* The interface of this class is modeled after the existing
* SparseMatrix class in deal.II. It has almost the same member
- * functions, and is often exchangable. However, since Trilinos only supports a
- * single scalar type (double), it is not templated, and only works with
- * doubles.
+ * functions, and is often exchangable. However, since Trilinos only
+ * supports a single scalar type (double), it is not templated, and
+ * only works with doubles.
*
- * Note that Trilinos only guarantees that operations do what you expect if the
- * functions @p GlobalAssemble has been called after matrix assembly.
- * Therefore, you need to call
- * SparseMatrix::compress() before you actually use the matrix. This also
- * calls @p FillComplete that compresses the storage format for sparse
- * matrices by discarding unused elements. Trilinos allows to continue with
- * assembling the matrix after calls to these functions, but since there are
- * no more free entries available after that any more, it is better to only
- * call SparseMatrix::compress() once at the end of the assembly stage and
- * before the matrix is actively used.
+ * Note that Trilinos only guarantees that operations do what you
+ * expect if the functions @p GlobalAssemble has been called after
+ * matrix assembly. Therefore, you need to call
+ * SparseMatrix::compress() before you actually use the matrix. This
+ * also calls @p FillComplete that compresses the storage format for
+ * sparse matrices by discarding unused elements. Trilinos allows to
+ * continue with assembling the matrix after calls to these functions,
+ * but since there are no more free entries available after that any
+ * more, it is better to only call SparseMatrix::compress() once at
+ * the end of the assembly stage and before the matrix is actively
+ * used.
*
* @ingroup TrilinosWrappers
* @ingroup Matrix1
{
public:
/**
- * A structure that describes some of
- * the traits of this class in terms of
- * its run-time behavior. Some other
- * classes (such as the block matrix
- * classes) that take one or other of
- * the matrix classes as its template
- * parameters can tune their behavior
- * based on the variables in this
- * class.
+ * A structure that describes
+ * some of the traits of this
+ * class in terms of its
+ * run-time behavior. Some
+ * other classes (such as the
+ * block matrix classes) that
+ * take one or other of the
+ * matrix classes as its
+ * template parameters can tune
+ * their behavior based on the
+ * variables in this class.
*/
struct Traits
{
- /**
- * It is safe to elide additions of
- * zeros to individual elements of
- * this matrix.
- */
+ /**
+ * It is safe to elide additions of
+ * zeros to individual elements of
+ * this matrix.
+ */
static const bool zero_addition_can_be_elided = true;
};
typedef MatrixIterators::const_iterator const_iterator;
/**
- * Declare a typedef in analogy to all
- * the other container classes.
+ * Declare a typedef in analogy
+ * to all the other container
+ * classes.
*/
typedef TrilinosScalar value_type;
SparseMatrix ();
/**
- * Constructor using an Epetra_Map
- * and a maximum number of nonzero
- * matrix entries. Note that this
- * number does not need to be exact,
- * and it is even allowed that
- * the actual matrix structure
- * has more nonzero entries than
- * specified in the constructor.
- * However it is still advantageous to
- * provide good estimates here since
- * this will considerably increase
- * the performance of the matrix.
+ * Constructor using an
+ * Epetra_Map and a maximum
+ * number of nonzero matrix
+ * entries. Note that this
+ * number does not need to be
+ * exact, and it is even
+ * allowed that the actual
+ * matrix structure has more
+ * nonzero entries than
+ * specified in the
+ * constructor. However it is
+ * still advantageous to
+ * provide good estimates here
+ * since this will considerably
+ * increase the performance of
+ * the matrix.
*/
SparseMatrix (const Epetra_Map &InputMap,
const unsigned int n_max_entries_per_row);
/**
- * Same as before, but now use the
- * exact number of nonzeros in each
- * matrix row. Since we know the
- * number of elements in the matrix
- * exactly in this case, we can
- * already allocate the right amount
- * of memory, which makes the
- * creation process including the
- * insertion of nonzero elements
- * by the respective
- * SparseMatrix::reinit call
+ * Same as before, but now use
+ * the exact number of nonzeros
+ * in each matrix row. Since we
+ * know the number of elements
+ * in the matrix exactly in
+ * this case, we can already
+ * allocate the right amount of
+ * memory, which makes the
+ * creation process including
+ * the insertion of nonzero
+ * elements by the respective
+ * SparseMatrix::reinit call
* considerably faster.
*/
SparseMatrix (const Epetra_Map &InputMap,
const std::vector<unsigned int> &n_entries_per_row);
/**
- * This function is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows
- * and columns. This interface is
- * meant to be used for generating
- * rectangular matrices, where one
- * map specifies the parallel
- * distribution of rows and the other
- * the one of the columns. This is
- * in contrast to the first
- * constructor, where the same map
- * is used for both the number of
- * rows and the number of columns.
- * The number
- * of columns per row is specified
- * by the maximum number of entries.
+ * This function is similar to
+ * the one above, but it now
+ * takes two different Epetra
+ * maps for rows and
+ * columns. This interface is
+ * meant to be used for
+ * generating rectangular
+ * matrices, where one map
+ * specifies the parallel
+ * distribution of rows and the
+ * other the one of the
+ * columns. This is in contrast
+ * to the first constructor,
+ * where the same map is used
+ * for both the number of rows
+ * and the number of columns.
+ * The number of columns per
+ * row is specified by the
+ * maximum number of entries.
*/
SparseMatrix (const Epetra_Map &InputRowMap,
const Epetra_Map &InputColMap,
const unsigned int n_max_entries_per_row);
/**
- * This function is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows
- * and columns. This interface is
- * meant to be used for generating
- * rectangular matrices, where one
- * map specifies the parallel
- * distribution of rows and the other
- * the one of the columns. The
- * vector n_entries_per_row specifies
- * the number of entries in each
- * row of the newly generated matrix.
+ * This function is similar to
+ * the one above, but it now
+ * takes two different Epetra
+ * maps for rows and
+ * columns. This interface is
+ * meant to be used for
+ * generating rectangular
+ * matrices, where one map
+ * specifies the parallel
+ * distribution of rows and the
+ * other the one of the
+ * columns. The vector
+ * n_entries_per_row specifies
+ * the number of entries in
+ * each row of the newly
+ * generated matrix.
*/
SparseMatrix (const Epetra_Map &InputRowMap,
const Epetra_Map &InputColMap,
const std::vector<unsigned int> &n_entries_per_row);
/**
- * Destructor. Made virtual so that one
- * can use pointers to this class.
+ * Destructor. Made virtual so
+ * that one can use pointers to
+ * this class.
*/
virtual ~SparseMatrix ();
/**
- * Copy the given matrix to this
- * one.
+ * Copy the given matrix to
+ * this one.
*
- * The function returns a reference to
- * <tt>*this</tt>.
+ * The function returns a
+ * reference to <tt>*this</tt>.
*/
SparseMatrix &
copy_from (const SparseMatrix &source);
/**
- * This function initializes the
- * Trilinos matrix with a deal.II
- * sparsity pattern, i.e. it
- * makes the Trilinos Epetra
- * matrix know the position of
- * nonzero entries according to
- * the sparsity pattern. Note that,
- * when using this function, the
- * matrix must already be initialized
- * with a suitable Epetra_Map that
- * describes the distribution of
- * the matrix among the MPI
+ * This function initializes
+ * the Trilinos matrix with a
+ * deal.II sparsity pattern,
+ * i.e. it makes the Trilinos
+ * Epetra matrix know the
+ * position of nonzero entries
+ * according to the sparsity
+ * pattern. Note that, when
+ * using this function, the
+ * matrix must already be
+ * initialized with a suitable
+ * Epetra_Map that describes
+ * the distribution of the
+ * matrix among the MPI
* processes. Otherwise, an
* error will be thrown.
*/
void reinit (const SparsityPattern &sparsity_pattern);
/**
- * This function is initializes the
- * Trilinos Epetra matrix according
- * to the specified sparsity_pattern,
- * and also reassigns the matrix
- * rows to different processes
+ * This function is initializes
+ * the Trilinos Epetra matrix
+ * according to the specified
+ * sparsity_pattern, and also
+ * reassigns the matrix rows to
+ * different processes
* according to a user-supplied
- * Epetra map. This might be useful
- * when the matrix structure changes,
- * e.g. when the grid is refined.
+ * Epetra map. This might be
+ * useful when the matrix
+ * structure changes, e.g. when
+ * the grid is refined.
*/
void reinit (const Epetra_Map &input_map,
const SparsityPattern &sparsity_pattern);
/**
- * This function is similar to the
- * other initialization function above,
- * but now also reassigns the matrix
- * rows and columns according to
- * two user-supplied Epetra maps.
- * To be used for rectangular
+ * This function is similar to
+ * the other initialization
+ * function above, but now also
+ * reassigns the matrix rows
+ * and columns according to two
+ * user-supplied Epetra maps.
+ * To be used for rectangular
* matrices.
*/
void reinit (const Epetra_Map &input_row_map,
const SparsityPattern &sparsity_pattern);
/**
- * This function copies the
- * content in <tt>sparse_matrix</tt>
- * to the current matrix.
+ * This function copies the
+ * content in
+ * <tt>sparse_matrix</tt> to
+ * the current matrix.
*/
void reinit (const SparseMatrix &sparse_matrix);
/**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries stored
- * therein. It uses a threshold
- * to copy only elements with
- * modulus larger than the
- * threshold (so zeros in the
- * deal.II matrix can be filtered
- * away).
+ * This function initializes
+ * the Trilinos matrix using
+ * the deal.II sparse matrix
+ * and the entries stored
+ * therein. It uses a threshold
+ * to copy only elements with
+ * modulus larger than the
+ * threshold (so zeros in the
+ * deal.II matrix can be
+ * filtered away).
*/
void reinit (const Epetra_Map &input_map,
const ::dealii::SparseMatrix<double> &dealii_sparse_matrix,
const double drop_tolerance=1e-13);
/**
- * This function is similar to the
- * other initialization function with
- * deal.II sparse matrix input above,
- * but now takes Epetra maps for both
- * the rows and the columns of the
- * matrix.
- * To be used for rectangular
- * matrices.
+ * This function is similar to
+ * the other initialization
+ * function with deal.II sparse
+ * matrix input above, but now
+ * takes Epetra maps for both
+ * the rows and the columns of
+ * the matrix. To be used for
+ * rectangular matrices.
*/
void reinit (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
const double drop_tolerance=1e-13);
/**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor.
+ * Release all memory and
+ * return to a state just like
+ * after having called the
+ * default constructor.
*/
void clear ();
/**
- * Trilinos matrices store their own
- * sparsity patterns. So, in analogy to
- * our own SparsityPattern class,
- * this function compresses the
- * sparsity pattern and allows the
- * resulting matrix to be used in all
- * other operations where before only
- * assembly functions were
- * allowed. This function must
- * therefore be called once you have
+ * Trilinos matrices store
+ * their own sparsity
+ * patterns. So, in analogy to
+ * our own SparsityPattern
+ * class, this function
+ * compresses the sparsity
+ * pattern and allows the
+ * resulting matrix to be used
+ * in all other operations
+ * where before only assembly
+ * functions were allowed. This
+ * function must therefore be
+ * called once you have
* assembled the matrix.
*/
void compress ();
/**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keeps the sparsity pattern
+ * This operator assigns a
+ * scalar to a matrix. Since
+ * this does usually not make
+ * much sense (should we set
+ * all matrix entries to this
+ * value? Only the nonzero
+ * entries of the sparsity
+ * pattern?), this operation is
+ * only allowed if the actual
+ * value to be assigned is
+ * zero. This operator only
+ * exists to allow for the
+ * obvious notation
+ * <tt>matrix=0</tt>, which
+ * sets all elements of the
+ * matrix to zero, but keeps
+ * the sparsity pattern
* previously used.
*/
SparseMatrix &
* Set the element (<i>i,j</i>)
* to @p value.
*
- * This function
- * adds a new entry to the
- * matrix if it didn't exist
- * before, very much in
- * contrast to the SparseMatrix
- * class which throws an error
- * if the entry does not exist.
- * If <tt>value</tt> is not a
+ * This function adds a new
+ * entry to the matrix if it
+ * didn't exist before, very
+ * much in contrast to the
+ * SparseMatrix class which
+ * throws an error if the entry
+ * does not exist. If
+ * <tt>value</tt> is not a
* finite number an exception
* is thrown.
*/
const TrilinosScalar value);
/**
- * Add @p value to the
- * element (<i>i,j</i>).
+ * Add @p value to the element
+ * (<i>i,j</i>).
*
- * This function
- * adds a new entry to the
- * matrix if it didn't exist
- * before, very much in
- * contrast to the SparseMatrix
- * class which throws an error
- * if the entry does not exist.
- * If <tt>value</tt> is not a
+ * This function adds a new
+ * entry to the matrix if it
+ * didn't exist before, very
+ * much in contrast to the
+ * SparseMatrix class which
+ * throws an error if the entry
+ * does not exist. If
+ * <tt>value</tt> is not a
* finite number an exception
* is thrown.
*/
* pattern, though (but retains
* the allocated memory in case
* new entries are again added
- * later). Note that this
- * is a global operation, so this
- * needs to be done on all
- * MPI processes.
+ * later). Note that this is a
+ * global operation, so this
+ * needs to be done on all MPI
+ * processes.
*
* This operation is used in
- * eliminating constraints (e.g. due to
- * hanging nodes) and makes sure that
- * we can write this modification to
- * the matrix without having to read
- * entries (such as the locations of
- * non-zero elements) from it --
- * without this o peration, removing
- * constraints on parallel matrices is
- * a rather complicated procedure.
+ * eliminating constraints
+ * (e.g. due to hanging nodes)
+ * and makes sure that we can
+ * write this modification to
+ * the matrix without having to
+ * read entries (such as the
+ * locations of non-zero
+ * elements) from it -- without
+ * this o peration, removing
+ * constraints on parallel
+ * matrices is a rather
+ * complicated procedure.
*
- * The second parameter can be used to
- * set the diagonal entry of this row
- * to a value different from zero. The
- * default is to set it to zero.
+ * The second parameter can be
+ * used to set the diagonal
+ * entry of this row to a value
+ * different from zero. The
+ * default is to set it to
+ * zero.
*/
void clear_row (const unsigned int row,
const TrilinosScalar new_diag_value = 0);
/**
- * Same as clear_row(), except that it
- * works on a number of rows at once.
+ * Same as clear_row(), except
+ * that it works on a number of
+ * rows at once.
*
- * The second parameter can be used to
- * set the diagonal entries of all
- * cleared rows to something different
- * from zero. Note that all of these
- * diagonal entries get the same value
- * -- if you want different values for
- * the diagonal entries, you have to
- * set them by hand.
+ * The second parameter can be
+ * used to set the diagonal
+ * entries of all cleared rows
+ * to something different from
+ * zero. Note that all of these
+ * diagonal entries get the
+ * same value -- if you want
+ * different values for the
+ * diagonal entries, you have
+ * to set them by hand.
*/
void clear_rows (const std::vector<unsigned int> &rows,
const TrilinosScalar new_diag_value = 0);
/**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you should
- * always take care where to call this
- * function. In contrast to the
- * respective function in the
- * @p SparseMatrix class, we don't
- * throw an exception if the respective
- * entry doesn't exist in the sparsity
- * pattern of this class, since Trilinos
- * does not transmit this information.
- * On the other hand, an exception
- * will be thrown when the requested
- * element is not saved on the calling
- * process.
+ * Return the value of the
+ * entry (<i>i,j</i>). This
+ * may be an expensive
+ * operation and you should
+ * always take care where to
+ * call this function. In
+ * contrast to the respective
+ * function in the @p
+ * SparseMatrix class, we don't
+ * throw an exception if the
+ * respective entry doesn't
+ * exist in the sparsity
+ * pattern of this class, since
+ * Trilinos does not transmit
+ * this information. On the
+ * other hand, an exception
+ * will be thrown when the
+ * requested element is not
+ * saved on the calling
+ * process.
*
- * This function is therefore exactly
- * equivalent to the <tt>el()</tt> function.
+ * This function is therefore
+ * exactly equivalent to the
+ * <tt>el()</tt> function.
*/
TrilinosScalar operator () (const unsigned int i,
const unsigned int j) const;
/**
- * Return the value of the matrix entry
- * (<i>i,j</i>). If this entry does not
- * exist in the sparsity pattern, then
- * zero is returned. While this may be
- * convenient in some cases, note that
- * it is simple to write algorithms
- * that are slow compared to an optimal
- * solution, since the sparsity of the
- * matrix is not used.
+ * Return the value of the
+ * matrix entry
+ * (<i>i,j</i>). If this entry
+ * does not exist in the
+ * sparsity pattern, then zero
+ * is returned. While this may
+ * be convenient in some cases,
+ * note that it is simple to
+ * write algorithms that are
+ * slow compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
*/
TrilinosScalar el (const unsigned int i,
const unsigned int j) const;
* row. This function throws an
* error if the matrix is not
* quadratic.
- *
- * TODO: Trilinos can access the
- * diagonal faster. Implement this!
*/
TrilinosScalar diag_element (const unsigned int i) const;
/**
- * Return the number of rows in this
- * matrix.
+ * Return the number of rows in
+ * this matrix.
*/
unsigned int m () const;
/**
- * Return the number of columns in this
- * matrix.
+ * Return the number of columns
+ * in this matrix.
*/
unsigned int n () const;
/**
- * Return the local dimension of the
- * matrix, i.e. the number of rows
- * stored on the present MPI
- * process. For sequential matrices,
- * this number is the same as m(),
- * but for parallel matrices it may be
- * smaller.
+ * Return the local dimension
+ * of the matrix, i.e. the
+ * number of rows stored on the
+ * present MPI process. For
+ * sequential matrices, this
+ * number is the same as m(),
+ * but for parallel matrices it
+ * may be smaller.
*
* To figure out which elements
* exactly are stored locally,
* indicating which rows of
* this matrix are stored
* locally. The first number is
- * the index of the first
- * row stored, the second
- * the index of the one past
- * the last one that is stored
- * locally. If this is a
- * sequential matrix, then the
- * result will be the pair
- * (0,m()), otherwise it will be
- * a pair (i,i+n), where
+ * the index of the first row
+ * stored, the second the index
+ * of the one past the last one
+ * that is stored locally. If
+ * this is a sequential matrix,
+ * then the result will be the
+ * pair (0,m()), otherwise it
+ * will be a pair (i,i+n),
+ * where
* <tt>n=local_size()</tt>.
*/
std::pair<unsigned int, unsigned int>
/**
* Return the number of nonzero
- * elements of this
- * matrix.
+ * elements of this matrix.
*/
unsigned int n_nonzero_elements () const;
/**
- * Number of entries in a specific row.
+ * Number of entries in a
+ * specific row.
*/
unsigned int row_length (const unsigned int row) const;
/**
- * Return the l1-norm of the matrix, that is
- * $|M|_1=max_{all columns j}\sum_{all
- * rows i} |M_ij|$,
- * (max. sum of columns).
- * This is the
- * natural matrix norm that is compatible
- * to the l1-norm for vectors, i.e.
+ * Return the l1-norm of the
+ * matrix, that is
+ * $|M|_1=max_{all columns
+ * j}\sum_{all rows i} |M_ij|$,
+ * (max. sum of columns). This
+ * is the natural matrix norm
+ * that is compatible to the
+ * l1-norm for vectors, i.e.
* $|Mv|_1\leq |M|_1 |v|_1$.
* (cf. Haemmerlin-Hoffmann:
* Numerische Mathematik)
TrilinosScalar l1_norm () const;
/**
- * Return the linfty-norm of the
- * matrix, that is
- * $|M|_infty=max_{all rows i}\sum_{all
- * columns j} |M_ij|$,
- * (max. sum of rows).
- * This is the
- * natural matrix norm that is compatible
- * to the linfty-norm of vectors, i.e.
- * $|Mv|_infty \leq |M|_infty |v|_infty$.
+ * Return the linfty-norm of
+ * the matrix, that is
+ * $|M|_infty=max_{all rows
+ * i}\sum_{all columns j}
+ * |M_ij|$, (max. sum of rows).
+ * This is the natural matrix
+ * norm that is compatible to
+ * the linfty-norm of vectors,
+ * i.e. $|Mv|_infty \leq
+ * |M|_infty |v|_infty$.
* (cf. Haemmerlin-Hoffmann:
* Numerische Mathematik)
*/
TrilinosScalar linfty_norm () const;
/**
- * Return the frobenius norm of the
- * matrix, i.e. the square root of the
- * sum of squares of all entries in the
+ * Return the frobenius norm of
+ * the matrix, i.e. the square
+ * root of the sum of squares
+ * of all entries in the
* matrix.
*/
TrilinosScalar frobenius_norm () const;
/**
- * Multiply the entire matrix by a
- * fixed factor.
+ * Multiply the entire matrix
+ * by a fixed factor.
*/
SparseMatrix & operator *= (const TrilinosScalar factor);
/**
- * Divide the entire matrix by a
- * fixed factor.
+ * Divide the entire matrix by
+ * a fixed factor.
*/
SparseMatrix & operator /= (const TrilinosScalar factor);
/**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
+ * Matrix-vector
+ * multiplication: let <i>dst =
+ * M*src</i> with <i>M</i>
+ * being this matrix.
*
* Source and destination must
* not be the same vector.
*
- * Note that both vectors have to be
- * distributed vectors generated using
- * the same Map as was used for the
- * matrix in case you work on a
- * distributed memory architecture,
- * using the interface of the
- * TrilinosWrappers::Vector class.
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface of the
+ * TrilinosWrappers::Vector
+ * class.
*/
void vmult (Vector &dst,
const Vector &src) const;
/**
- * Matrix-vector multiplication: let
- * <i>dst = M<sup>T</sup>*src</i> with
- * <i>M</i> being this matrix. This
- * function does the same as vmult()
- * but takes the transposed matrix.
+ * Matrix-vector
+ * multiplication: let <i>dst =
+ * M<sup>T</sup>*src</i> with
+ * <i>M</i> being this
+ * matrix. This function does
+ * the same as vmult() but
+ * takes the transposed matrix.
*
* Source and destination must
* not be the same vector.
*
- * Note that both vectors have to be
- * distributed vectors generated using
- * the same Map as was used for the
- * matrix in case you work on a
- * distributed memory architecture,
- * using the interface of the
- * TrilinosWrappers::Vector class.
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface of the
+ * TrilinosWrappers::Vector
+ * class.
*/
void Tvmult (Vector &dst,
const Vector &src) const;
* Source and destination must
* not be the same vector.
*
- * Note that both vectors have to be
- * distributed vectors generated using
- * the same Map as was used for the
- * matrix in case you work on a
- * distributed memory architecture,
- * using the interface of the
- * TrilinosWrappers::Vector class.
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface of the
+ * TrilinosWrappers::Vector
+ * class.
*/
void vmult_add (Vector &dst,
const Vector &src) const;
* Adding Matrix-vector
* multiplication. Add
* <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
+ * <i>dst</i> with <i>M</i>
+ * being this matrix. This
+ * function does the same as
+ * vmult_add() but takes the
+ * transposed matrix.
*
* Source and destination must
* not be the same vector.
*
- * Note that both vectors have to be
- * distributed vectors generated using
- * the same Map as was used for the
- * matrix in case you work on a
- * distributed memory architecture,
- * using the interface of the
- * TrilinosWrappers::Vector class.
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface of the
+ * TrilinosWrappers::Vector
+ * class.
*/
void Tvmult_add (Vector &dst,
const Vector &src) const;
/**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix,
+ * Return the square of the
+ * norm of the vector $v$ with
+ * respect to the norm induced
+ * by this matrix,
* i.e. $\left(v,Mv\right)$. This
- * is useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
+ * is useful, e.g. in the
+ * finite element context,
+ * where the $L_2$ norm of a
+ * function equals the matrix
+ * norm with respect to the
+ * mass matrix of the vector
+ * representing the nodal
+ * values of the finite element
+ * function.
*
- * Obviously, the matrix needs to
- * be quadratic for this operation.
+ * Obviously, the matrix needs
+ * to be quadratic for this
+ * operation.
*
- * The implementation of this function
- * is not as efficient as the one in
- * the @p SparseMatrix class used in
- * deal.II (i.e. the original one, not
- * the Trilinos wrapper class) since Trilinos
- * doesn't support this operation and
- * needs a temporary vector.
+ * The implementation of this
+ * function is not as efficient
+ * as the one in the @p
+ * SparseMatrix class used in
+ * deal.II (i.e. the original
+ * one, not the Trilinos
+ * wrapper class) since
+ * Trilinos doesn't support
+ * this operation and needs a
+ * temporary vector.
*
- * Note that both vectors have to be
- * distributed vectors generated using
- * the same Map as was used for the
- * matrix in case you work on a
- * distributed memory architecture,
- * using the interface of the
- * TrilinosWrappers::Vector class.
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface of the
+ * TrilinosWrappers::Vector
+ * class.
*/
TrilinosScalar matrix_norm_square (const Vector &v) const;
* Compute the matrix scalar
* product $\left(u,Mv\right)$.
*
- * The implementation of this function
- * is not as efficient as the one in
- * the @p SparseMatrix class used in
- * deal.II (i.e. the original one, not
- * the Trilinos wrapper class) since Trilinos
- * doesn't support this operation and
- * needs a temporary vector.
+ * The implementation of this
+ * function is not as efficient
+ * as the one in the @p
+ * SparseMatrix class used in
+ * deal.II (i.e. the original
+ * one, not the Trilinos
+ * wrapper class) since
+ * Trilinos doesn't support
+ * this operation and needs a
+ * temporary vector.
*
- * Note that both vectors have to be
- * distributed vectors generated using
- * the same Map as was used for the
- * matrix in case you work on a
- * distributed memory architecture,
- * using the interface of the
- * TrilinosWrappers::Vector class.
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface of the
+ * TrilinosWrappers::Vector
+ * class.
*/
TrilinosScalar matrix_scalar_product (const Vector &u,
const Vector &v) const;
/**
* Compute the residual of an
* equation <i>Mx=b</i>, where
- * the residual is defined to be
- * <i>r=b-Mx</i>. Write the
- * residual into
- * @p dst. The
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
* <i>l<sub>2</sub></i> norm of
* the residual vector is
* returned.
*
- * Source <i>x</i> and destination
- * <i>dst</i> must not be the same
- * vector.
+ * Source <i>x</i> and
+ * destination <i>dst</i> must
+ * not be the same vector.
*
- * Note that both vectors have to be
- * distributed vectors generated using
- * the same Map as was used for the
- * matrix in case you work on a
- * distributed memory architecture,
- * using the interface of the
- * TrilinosWrappers::Vector class.
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface of the
+ * TrilinosWrappers::Vector
+ * class.
*/
TrilinosScalar residual (Vector &dst,
const Vector &x,
const Vector &b) const;
/**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix <tt>factor*matrix</tt>
- * is added to <tt>this</tt>.
+ * Add <tt>matrix</tt> scaled
+ * by <tt>factor</tt> to this
+ * matrix, i.e. the matrix
+ * <tt>factor*matrix</tt> is
+ * added to <tt>this</tt>.
*/
void add (const TrilinosScalar factor,
const SparseMatrix &matrix);
* STL-like iterator with the
* first entry of row @p r.
*
- * Note that if the given row is empty,
- * i.e. does not contain any nonzero
- * entries, then the iterator returned by
- * this function equals
- * <tt>end(r)</tt>. Note also that the
- * iterator may not be dereferencable in
- * that case.
+ * Note that if the given row
+ * is empty, i.e. does not
+ * contain any nonzero entries,
+ * then the iterator returned
+ * by this function equals
+ * <tt>end(r)</tt>. Note also
+ * that the iterator may not be
+ * dereferencable in that case.
*/
const_iterator begin (const unsigned int r) const;
/**
- * Final iterator of row <tt>r</tt>. It
- * points to the first element past the
- * end of line @p r, or past the end of
- * the entire sparsity pattern.
+ * Final iterator of row
+ * <tt>r</tt>. It points to the
+ * first element past the end
+ * of line @p r, or past the
+ * end of the entire sparsity
+ * pattern.
*
- * Note that the end iterator is not
- * necessarily dereferencable. This is in
- * particular the case if it is the end
- * iterator for the last row of a matrix.
+ * Note that the end iterator
+ * is not necessarily
+ * dereferencable. This is in
+ * particular the case if it is
+ * the end iterator for the
+ * last row of a matrix.
*/
const_iterator end (const unsigned int r) const;
- /**
- * Make an in-place transpose of a
- * matrix.
- */
+ /**
+ * Make an in-place transpose
+ * of a matrix.
+ */
void transpose ();
- /**
- * Test whether a matrix is symmetric.
- * Default tolerance is zero.
- * TODO: Not implemented.
- */
+ /**
+ * Test whether a matrix is
+ * symmetric. Default
+ * tolerance is zero. TODO:
+ * Not implemented.
+ */
bool is_symmetric (const double tol = 0.0);
- /**
- * Test whether a matrix is Hermitian,
- * i.e. it is the complex conjugate
- * of its transpose.
- * TODO: Not implemented.
- */
+ /**
+ * Test whether a matrix is
+ * Hermitian, i.e. it is the
+ * complex conjugate of its
+ * transpose. TODO: Not
+ * implemented.
+ */
bool is_hermitian ();
- /**
- * Abstract Trilinos object that helps view
- * in ASCII other Trilinos objects. Currently
- * this function is not implemented.
- * TODO: Not implemented.
- */
+ /**
+ * Abstract Trilinos object
+ * that helps view in ASCII
+ * other Trilinos
+ * objects. Currently this
+ * function is not
+ * implemented. TODO: Not
+ * implemented.
+ */
void write_ascii ();
/**
- * Print the matrix to the given
- * stream, using the format
- * <tt>(line,col) value</tt>,
- * i.e. one nonzero entry of the
- * matrix per line.
+ * Print the matrix to the
+ * given stream, using the
+ * format <tt>(line,col)
+ * value</tt>, i.e. one nonzero
+ * entry of the matrix per
+ * line.
*/
void print (std::ostream &out) const;
private:
/**
- * Epetra Trilinos mapping of the
- * matrix rows that
- * assigns parts of the matrix to
- * the individual processes.
- * TODO: is it possible to only use
- * a pointer?
- */
- Epetra_Map row_map;
+ * Dummy pointer that is used
+ * internally in the
+ * constructor for empty
+ * matrices.
+ */
+ const Epetra_Map *dummy_map;
/**
- * Pointer to the user-supplied
- * Epetra Trilinos mapping of the
- * matrix columns that
- * assigns parts of the matrix to
- * the individual processes.
+ * Pointer to Epetra Trilinos
+ * mapping of the matrix rows
+ * that assigns parts of the
+ * matrix to the individual
+ * processes. This map is
+ * provided either via the
+ * constructor or in a reinit
+ * function.
*/
- Epetra_Map col_map;
+ Epetra_Map *row_map;
/**
- * Trilinos doesn't allow to mix additions
- * to matrix entries and overwriting
- * them (to make synchronisation of
+ * Pointer to the user-supplied
+ * Epetra Trilinos mapping of
+ * the matrix columns that
+ * assigns parts of the matrix
+ * to the individual processes.
+ */
+ Epetra_Map *col_map;
+
+ /**
+ * Trilinos doesn't allow to
+ * mix additions to matrix
+ * entries and overwriting them
+ * (to make synchronisation of
* parallel computations
- * simpler). The way we do it is to,
- * for each access operation, store
- * whether it is an insertion or an
- * addition. If the previous one was of
- * different type, then we first have
- * to flush the Trilinos buffers;
- * otherwise, we can simply go on.
- * Luckily, Trilinos has an object
- * for this which does already all
- * the parallel communications in
- * such a case, so we simply use their
- * model, which stores whether the
- * last operation was an addition
- * or an insertion.
+ * simpler). The way we do it
+ * is to, for each access
+ * operation, store whether it
+ * is an insertion or an
+ * addition. If the previous
+ * one was of different type,
+ * then we first have to flush
+ * the Trilinos buffers;
+ * otherwise, we can simply go
+ * on. Luckily, Trilinos has
+ * an object for this which
+ * does already all the
+ * parallel communications in
+ * such a case, so we simply
+ * use their model, which
+ * stores whether the last
+ * operation was an addition or
+ * an insertion.
*/
Epetra_CombineMode last_action;
public:
/**
* A sparse matrix object in
- * Trilinos to be used for
- * finite element based problems
- * which allows for assembling into
- * non-local elements.
- * The actual type, a sparse
- * matrix, is set in the constructor.
+ * Trilinos to be used for
+ * finite element based
+ * problems which allows for
+ * assembling into non-local
+ * elements. The actual type,
+ * a sparse matrix, is set in
+ * the constructor.
*/
std::auto_ptr<Epetra_FECrsMatrix> matrix;
inline
const_iterator::Accessor::
- Accessor (const SparseMatrix *matrix,
+ Accessor (const SparseMatrix *matrix,
const unsigned int row,
const unsigned int index)
:
// forward declaration
class Vector;
- /**
- * @cond internal
- */
+ /**
+ * @cond internal
+ */
/**
- * A namespace for internal implementation details of the TrilinosWrapper
- * members.
- * @ingroup TrilinosWrappers
+ * A namespace for internal implementation details of the
+ * TrilinosWrapper members. @ingroup TrilinosWrappers
*/
namespace internal
{
- /**
- * This class implements a wrapper for
- * accessing the Trilinos vector
- * in the same way as we access
- * deal.II objects:
- * it is initialized with a vector and an
- * element within it, and has a
- * conversion operator to extract the
- * scalar value of this element. It also
- * has a variety of assignment operator
- * for writing to this one element.
- * @ingroup TrilinosWrappers
- */
+ /**
+ * This class implements a
+ * wrapper for accessing the
+ * Trilinos vector in the same
+ * way as we access deal.II
+ * objects: it is initialized
+ * with a vector and an element
+ * within it, and has a
+ * conversion operator to
+ * extract the scalar value of
+ * this element. It also has a
+ * variety of assignment
+ * operator for writing to this
+ * one element. @ingroup
+ * TrilinosWrappers
+ */
class VectorReference
{
private:
- /**
- * Constructor. It is made private so
- * as to only allow the actual vector
- * class to create it.
- */
+ /**
+ * Constructor. It is made
+ * private so as to only allow
+ * the actual vector class to
+ * create it.
+ */
VectorReference (Vector &vector,
const unsigned int index);
public:
- /**
- * This looks like a copy operator,
- * but does something different than
- * usual. In particular, it does not
- * copy the member variables of this
- * reference. Rather, it handles the
- * situation where we have two
- * vectors @p v and @p w, and assign
- * elements like in
- * <tt>v(i)=w(i)</tt>. Here, both
- * left and right hand side of the
- * assignment have data type
- * VectorReference, but what we
- * really mean is to assign the
- * vector elements represented by the
- * two references. This operator
- * implements this operation. Note
- * also that this allows us to make
- * the assignment operator const.
- */
+ /**
+ * This looks like a copy
+ * operator, but does something
+ * different than usual. In
+ * particular, it does not copy
+ * the member variables of this
+ * reference. Rather, it
+ * handles the situation where
+ * we have two vectors @p v and
+ * @p w, and assign elements
+ * like in
+ * <tt>v(i)=w(i)</tt>. Here,
+ * both left and right hand
+ * side of the assignment have
+ * data type VectorReference,
+ * but what we really mean is
+ * to assign the vector
+ * elements represented by the
+ * two references. This
+ * operator implements this
+ * operation. Note also that
+ * this allows us to make the
+ * assignment operator const.
+ */
const VectorReference & operator = (const VectorReference &r) const;
- /**
- * Set the referenced element of the
- * vector to <tt>s</tt>.
- */
+ /**
+ * Set the referenced element of the
+ * vector to <tt>s</tt>.
+ */
const VectorReference & operator = (const TrilinosScalar &s) const;
- /**
- * Add <tt>s</tt> to the referenced
- * element of the vector->
- */
+ /**
+ * Add <tt>s</tt> to the
+ * referenced element of the
+ * vector->
+ */
const VectorReference & operator += (const TrilinosScalar &s) const;
- /**
- * Subtract <tt>s</tt> from the
- * referenced element of the vector->
- */
+ /**
+ * Subtract <tt>s</tt> from the
+ * referenced element of the
+ * vector->
+ */
const VectorReference & operator -= (const TrilinosScalar &s) const;
- /**
- * Multiply the referenced element of
- * the vector by <tt>s</tt>.
- */
+ /**
+ * Multiply the referenced
+ * element of the vector by
+ * <tt>s</tt>.
+ */
const VectorReference & operator *= (const TrilinosScalar &s) const;
- /**
- * Divide the referenced element of
- * the vector by <tt>s</tt>.
- */
+ /**
+ * Divide the referenced
+ * element of the vector by
+ * <tt>s</tt>.
+ */
const VectorReference & operator /= (const TrilinosScalar &s) const;
- /**
- * Convert the reference to an actual
- * value, i.e. return the value of
- * the referenced element of the
- * vector.
- */
+ /**
+ * Convert the reference to an
+ * actual value, i.e. return
+ * the value of the referenced
+ * element of the vector.
+ */
operator TrilinosScalar () const;
- /**
- * Exception
- */
+ /**
+ * Exception
+ */
DeclException1 (ExcTrilinosError,
int,
<< "An error with error number " << arg1
<< " occured while calling a Trilinos function");
- /**
- * Exception
- */
+ /**
+ * Exception
+ */
DeclException3 (ExcAccessToNonLocalElement,
int, int, int,
<< "You tried to access element " << arg1
<< " are stored locally and can be accessed.");
private:
- /**
- * Point to the vector we are
- * referencing.
- */
+ /**
+ * Point to the vector we are
+ * referencing.
+ */
Vector &vector;
- /**
- * Index of the referenced element
- * of the vector.
- */
+ /**
+ * Index of the referenced element
+ * of the vector.
+ */
const unsigned int index;
- /**
- * Make the vector class a friend, so
- * that it can create objects of the
- * present type.
- */
+ /**
+ * Make the vector class a
+ * friend, so that it can
+ * create objects of the
+ * present type.
+ */
friend class ::dealii::TrilinosWrappers::Vector;
};
}
- /**
- * @endcond
- */
+ /**
+ * @endcond
+ */
/**
- * This class implements a wrapper to use the Trilinos distributed vector
- * class Epetra_FEVector. This is precisely the kind of vector we deal with
- * all the time - we probably get it from some assembly process, where also
- * entries not locally owned might need to written and hence need to be
- * forwarded to the owner. This class is designed to be used in a distributed
- * memory architecture with an MPI compiler on the bottom, but works equally
- * well also for serial processes. The only requirement for this class to
- * work is that Trilinos is installed with the respective compiler as a
- * basis.
+ * This class implements a wrapper to use the Trilinos distributed
+ * vector class Epetra_FEVector. This is precisely the kind of vector
+ * we deal with all the time - we probably get it from some assembly
+ * process, where also entries not locally owned might need to written
+ * and hence need to be forwarded to the owner. This class is designed
+ * to be used in a distributed memory architecture with an MPI
+ * compiler on the bottom, but works equally well also for serial
+ * processes. The only requirement for this class to work is that
+ * Trilinos is installed with the respective compiler as a basis.
*
* The interface of this class is modeled after the existing Vector
- * class in deal.II. It has almost the same member functions, and is often
- * exchangable. However, since Trilinos only supports a single scalar type
- * (double), it is not templated, and only works with that type.
+ * class in deal.II. It has almost the same member functions, and is
+ * often exchangable. However, since Trilinos only supports a single
+ * scalar type (double), it is not templated, and only works with that
+ * type.
*
- * Note that Trilinos only guarantees that operations do what you expect if the
- * function @p GlobalAssemble has been called after vector assembly in order to
- * distribute the data. Therefore, you need to call Vector::compress()
- * before you actually use the vectors.
+ * Note that Trilinos only guarantees that operations do what you
+ * expect if the function @p GlobalAssemble has been called after
+ * vector assembly in order to distribute the data. Therefore, you
+ * need to call Vector::compress() before you actually use the
+ * vectors.
*
* <h3>Parallel communication model</h3>
*
- * The parallel functionality of Trilinos is built on top of the Message Passing
- * Interface (MPI). MPI's communication model is built on collective
- * communications: if one process wants something from another, that other
- * process has to be willing to accept this communication. A process cannot
- * query data from another process by calling a remote function, without that
- * other process expecting such a transaction. The consequence is that most of
- * the operations in the base class of this class have to be called
- * collectively. For example, if you want to compute the l2 norm of a parallel
- * vector, @em all processes across which this vector is shared have to call
- * the @p l2_norm function. If you don't do this, but instead only call the @p
- * l2_norm function on one process, then the following happens: This one
- * process will call one of the collective MPI functions and wait for all the
- * other processes to join in on this. Since the other processes don't call
- * this function, you will either get a time-out on the first process, or,
- * worse, by the time the next a callto a Trilinos function generates an MPI
- * message on the other processes , you will get a cryptic message that only a
- * subset of processes attempted a communication. These bugs can be very hard
- * to figure out, unless you are well-acquainted with the communication model
- * of MPI, and know which functions may generate MPI messages.
+ * The parallel functionality of Trilinos is built on top of the
+ * Message Passing Interface (MPI). MPI's communication model is built
+ * on collective communications: if one process wants something from
+ * another, that other process has to be willing to accept this
+ * communication. A process cannot query data from another process by
+ * calling a remote function, without that other process expecting
+ * such a transaction. The consequence is that most of the operations
+ * in the base class of this class have to be called collectively. For
+ * example, if you want to compute the l2 norm of a parallel vector,
+ * @em all processes across which this vector is shared have to call
+ * the @p l2_norm function. If you don't do this, but instead only
+ * call the @p l2_norm function on one process, then the following
+ * happens: This one process will call one of the collective MPI
+ * functions and wait for all the other processes to join in on
+ * this. Since the other processes don't call this function, you will
+ * either get a time-out on the first process, or, worse, by the time
+ * the next a callto a Trilinos function generates an MPI message on
+ * the other processes , you will get a cryptic message that only a
+ * subset of processes attempted a communication. These bugs can be
+ * very hard to figure out, unless you are well-acquainted with the
+ * communication model of MPI, and know which functions may generate
+ * MPI messages.
*
- * One particular case, where an MPI message may be generated unexpectedly is
- * discussed below.
+ * One particular case, where an MPI message may be generated
+ * unexpectedly is discussed below.
*
* <h3>Accessing individual elements of a vector</h3>
*
- * Trilinos does allow read access to individual elements of a vector, but in the
- * distributed case only to elements that are stored locally. We implement
- * this through calls like <tt>d=vec(i)</tt>. However, if you access an
- * element outside the locally stored range, an exception is generated.
+ * Trilinos does allow read access to individual elements of a vector,
+ * but in the distributed case only to elements that are stored
+ * locally. We implement this through calls like
+ * <tt>d=vec(i)</tt>. However, if you access an element outside the
+ * locally stored range, an exception is generated.
*
- * In contrast to read access, Trilinos (and the respective deal.II wrapper
- * classes) allow to write (or add) to individual elements of vectors, even if
- * they are stored on a different process. You can do this writing, for
- * example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>, or similar
- * operations. There is one catch, however, that may lead to very confusing
- * error messages: Trilinos requires application programs to call the compress()
- * function when they switch from adding, to elements to writing to
- * elements. The reasoning is that all processes might accumulate addition
- * operations to elements, even if multiple processes write to the same
- * elements. By the time we call compress() the next time, all these additions
- * are executed. However, if one process adds to an element, and another
- * overwrites to it, the order of execution would yield non-deterministic
- * behavior if we don't make sure that a synchronisation with compress()
- * happens in between.
+ * In contrast to read access, Trilinos (and the respective deal.II
+ * wrapper classes) allow to write (or add) to individual elements of
+ * vectors, even if they are stored on a different process. You can do
+ * this writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>,
+ * or similar operations. There is one catch, however, that may lead
+ * to very confusing error messages: Trilinos requires application
+ * programs to call the compress() function when they switch from
+ * adding, to elements to writing to elements. The reasoning is that
+ * all processes might accumulate addition operations to elements,
+ * even if multiple processes write to the same elements. By the time
+ * we call compress() the next time, all these additions are
+ * executed. However, if one process adds to an element, and another
+ * overwrites to it, the order of execution would yield
+ * non-deterministic behavior if we don't make sure that a
+ * synchronisation with compress() happens in between.
*
- * In order to make sure these calls to compress() happen at the appropriate
- * time, the deal.II wrappers keep a state variable that store which is the
- * presently allowed operation: additions or writes. If it encounters an
- * operation of the opposite kind, it calls compress() and flips the
- * state. This can sometimes lead to very confusing behavior, in code that may
- * for example look like this:
- * @verbatim
- * TrilinosWrappers::Vector vector;
- * ...
- * // do some write operations on the vector
- * for (unsigned int i=0; i<vector->size(); ++i)
- * vector(i) = i;
+ * In order to make sure these calls to compress() happen at the
+ * appropriate time, the deal.II wrappers keep a state variable that
+ * store which is the presently allowed operation: additions or
+ * writes. If it encounters an operation of the opposite kind, it
+ * calls compress() and flips the state. This can sometimes lead to
+ * very confusing behavior, in code that may for example look like
+ * this: @verbatim TrilinosWrappers::Vector vector; ... // do some
+ * write operations on the vector for (unsigned int i=0;
+ * i<vector->size(); ++i) vector(i) = i;
*
* // do some additions to vector elements, but
* // only for some elements
* const double norm = vector->l2_norm();
* @endverbatim
*
- * This code can run into trouble: by the time we see the first addition
- * operation, we need to flush the overwrite buffers for the vector, and the
- * deal.II library will do so by calling compress(). However, it will only do
- * so for all processes that actually do an addition -- if the condition is
- * never true for one of the processes, then this one will not get to the
- * actual compress() call, whereas all the other ones do. This gets us into
- * trouble, since all the other processes hang in the call to flush the write
- * buffers, while the one other process advances to the call to compute the l2
- * norm. At this time, you will get an error that some operation was attempted
- * by only a subset of processes. This behavior may seem surprising, unless
- * you know that write/addition operations on single elements may trigger this
- * behavior.
+ * This code can run into trouble: by the time we see the first
+ * addition operation, we need to flush the overwrite buffers for the
+ * vector, and the deal.II library will do so by calling
+ * compress(). However, it will only do so for all processes that
+ * actually do an addition -- if the condition is never true for one
+ * of the processes, then this one will not get to the actual
+ * compress() call, whereas all the other ones do. This gets us into
+ * trouble, since all the other processes hang in the call to flush
+ * the write buffers, while the one other process advances to the call
+ * to compute the l2 norm. At this time, you will get an error that
+ * some operation was attempted by only a subset of processes. This
+ * behavior may seem surprising, unless you know that write/addition
+ * operations on single elements may trigger this behavior.
*
- * The problem described here may be avoided by placing additional calls to
- * compress(), or making sure that all processes do the same type of
- * operations at the same time, for example by placing zero additions if
- * necessary.
+ * The problem described here may be avoided by placing additional
+ * calls to compress(), or making sure that all processes do the same
+ * type of operations at the same time, for example by placing zero
+ * additions if necessary.
*
* @ingroup TrilinosWrappers
* @ingroup Vectors
{
public:
/**
- * Declare some of the standard types
- * used in all containers. These types
- * parallel those in the <tt>C</tt>
- * standard libraries <tt>vector<...></tt>
- * class.
+ * Declare some of the standard
+ * types used in all
+ * containers. These types
+ * parallel those in the
+ * <tt>C</tt> standard
+ * libraries
+ * <tt>vector<...></tt> class.
*/
typedef TrilinosScalar value_type;
typedef TrilinosScalar real_type;
typedef const internal::VectorReference const_reference;
/**
- * Default constructor. It doesn't do
- * anything, derived classes will have
- * to initialize the data.
+ * Default constructor. It
+ * doesn't do anything, derived
+ * classes will have to
+ * initialize the data.
*/
Vector ();
- /**
- * One of the constructors that
- * actually builds a vector. This
- * one requires prior knowledge
- * of the size of the vector and
- * a communicator from
- * Epetra_CommSerial or Epetra_CommMpi,
- * depending on whether we use a
- * serial or parallel MPI-based program.
- * This command distributes the
- * vector linearly among the processes,
- * from the beginning to the end,
- * so you might want to use some
- * more advanced mapping and the
- * constructor with argument
- * Epetra_Map.
- */
- Vector (unsigned int GlobalSize, Epetra_Comm &Comm);
/**
* This constructor takes an
- * Epetra_Map that already knows how
- * to distribute the individual
- * components among the MPI processors,
- * including the size of the vector.
+ * Epetra_Map that already
+ * knows how to distribute the
+ * individual components among
+ * the MPI processors,
+ * including the size of the
+ * vector.
*/
Vector (const Epetra_Map &InputMap);
/**
- * Copy constructor. Sets the dimension
- * to that of the given vector and uses
- * the map of that vector, but
- * does not copy any element. Instead,
- * the memory will remain untouched
- * in case <tt>fast</tt> is false and
- * initialized with zero otherwise.
+ * Copy constructor. Sets the
+ * dimension to that of the
+ * given vector and uses the
+ * map of that vector, but does
+ * not copy any
+ * element. Instead, the memory
+ * will remain untouched in
+ * case <tt>fast</tt> is false
+ * and initialized with zero
+ * otherwise.
*/
Vector (const Vector &v,
const bool fast = false);
Vector & operator += (const Vector &V);
/**
- * Subtract the given vector from the
- * present one.
+ * Subtract the given vector
+ * from the present one.
*/
Vector & operator -= (const Vector &V);
/**
* Addition of @p s to all
- * components. Note that @p s is a
- * scalar and not a vector.
+ * components. Note that @p s
+ * is a scalar and not a
+ * vector.
*/
void add (const TrilinosScalar s);
/**
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
+ * Simple vector addition,
+ * equal to the <tt>operator
+ * +=</tt>.
*/
void add (const Vector &V);
/**
- * Simple addition of a multiple of a
- * vector, i.e. <tt>*this = a*V</tt>.
+ * Simple addition of a
+ * multiple of a vector,
+ * i.e. <tt>*this = a*V</tt>.
*/
void add (const TrilinosScalar a, const Vector &V);
/**
- * Multiple addition of scaled vectors,
- * i.e. <tt>*this = a*V + b*W</tt>.
+ * Multiple addition of scaled
+ * vectors, i.e. <tt>*this =
+ * a*V + b*W</tt>.
*/
void add (const TrilinosScalar a, const Vector &V,
const TrilinosScalar b, const Vector &W);
/**
- * Scaling and simple vector addition,
- * i.e.
- * <tt>*this = s*(*this) + V</tt>.
+ * Scaling and simple vector
+ * addition, i.e. <tt>*this =
+ * s*(*this) + V</tt>.
*/
void sadd (const TrilinosScalar s,
const Vector &V);
/**
- * Scaling and simple addition, i.e.
- * <tt>*this = s*(*this) + a*V</tt>.
+ * Scaling and simple addition,
+ * i.e. <tt>*this = s*(*this)
+ * + a*V</tt>.
*/
void sadd (const TrilinosScalar s,
const TrilinosScalar a,
const Vector &V);
/**
- * Scaling and multiple addition.
+ * Scaling and multiple
+ * addition.
*/
void sadd (const TrilinosScalar s,
const TrilinosScalar a,
const Vector &W);
/**
- * Scaling and multiple addition.
- * <tt>*this = s*(*this) + a*V + b*W + c*X</tt>.
+ * Scaling and multiple
+ * addition. <tt>*this =
+ * s*(*this) + a*V + b*W +
+ * c*X</tt>.
*/
void sadd (const TrilinosScalar s,
const TrilinosScalar a,
/**
* Scale each element of this
* vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
+ * element in the
+ * argument. This function is
+ * mostly meant to simulate
+ * multiplication (and
+ * immediate re-assignment) by
+ * a diagonal scaling matrix.
*/
void scale (const Vector &scaling_factors);
/**
- * Assignment <tt>*this = a*V</tt>.
+ * Assignment <tt>*this =
+ * a*V</tt>.
*/
void equ (const TrilinosScalar a, const Vector &V);
/**
- * Assignment <tt>*this = a*V + b*W</tt>.
+ * Assignment <tt>*this = a*V +
+ * b*W</tt>.
*/
void equ (const TrilinosScalar a, const Vector &V,
const TrilinosScalar b, const Vector &W);
/**
- * Compute the elementwise ratio of the
- * two given vectors, that is let
- * <tt>this[i] = a[i]/b[i]</tt>. This is
- * useful for example if you want to
- * compute the cellwise ratio of true to
- * estimated error.
+ * Compute the elementwise
+ * ratio of the two given
+ * vectors, that is let
+ * <tt>this[i] =
+ * a[i]/b[i]</tt>. This is
+ * useful for example if you
+ * want to compute the cellwise
+ * ratio of true to estimated
+ * error.
*
* This vector is appropriately
* scaled to hold the result.
*
- * If any of the <tt>b[i]</tt> is
- * zero, the result is
- * undefined. No attempt is made
- * to catch such situations.
+ * If any of the <tt>b[i]</tt>
+ * is zero, the result is
+ * undefined. No attempt is
+ * made to catch such
+ * situations.
*/
void ratio (const Vector &a,
const Vector &b);
/**
- * Output of vector in user-defined
- * format in analogy to the
+ * Output of vector in
+ * user-defined format in
+ * analogy to the
* dealii::Vector<number> class.
*/
void print (const char* format = 0) const;
/**
- * Print to a
- * stream. @p precision denotes
- * the desired precision with
- * which values shall be printed,
- * @p scientific whether
+ * Print to a stream. @p
+ * precision denotes the
+ * desired precision with which
+ * values shall be printed, @p
+ * scientific whether
* scientific notation shall be
- * used. If @p across is
- * @p true then the vector is
+ * used. If @p across is @p
+ * true then the vector is
* printed in a line, while if
* @p false then the elements
- * are printed on a separate line
- * each.
+ * are printed on a separate
+ * line each.
*/
void print (std::ostream &out,
const unsigned int precision = 3,
* vector and the other vector
* @p v. One could do this
* operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around. Note that the
- * vectors need to be of the same
- * size and base on the same
- * map.
+ * variable and copying over
+ * the data elements, but this
+ * function is significantly
+ * more efficient since it only
+ * swaps the pointers to the
+ * data of the two vectors and
+ * therefore does not need to
+ * allocate temporary storage
+ * and move data around. Note
+ * that the vectors need to be
+ * of the same size and base on
+ * the same map.
*
* This function is analog to the
* the @p swap function of all C
private:
/**
- * An Epetra map used to map vector data
- * accross multiple processes. This is
- * the communicator and data distribution
- * object common to all
- * Trilinos objects used by deal.II.
+ * A dummy pointer to an
+ * Epetra_Map to be used for
+ * the default constructor.
+ */
+ Epetra_Map *dummy_map;
+
+ /**
+ * A pointer to an Epetra map
+ * used to map vector data
+ * accross multiple
+ * processes. This is the
+ * communicator and data
+ * distribution object common
+ * to all Trilinos objects used
+ * by deal.II.
*/
- Epetra_Map map;
+ Epetra_Map *map;
/**
- * Trilinos doesn't allow to mix additions
- * to matrix entries and overwriting
- * them (to make synchronisation of
+ * Trilinos doesn't allow to
+ * mix additions to matrix
+ * entries and overwriting them
+ * (to make synchronisation of
* parallel computations
- * simpler). The way we do it is to,
- * for each access operation, store
- * whether it is an insertion or an
- * addition. If the previous one was of
- * different type, then we first have
- * to flush the Trilinos buffers;
- * otherwise, we can simply go on.
- * Luckily, Trilinos has an object
- * for this which does already all
- * the parallel communications in
- * such a case, so we simply use their
- * model, which stores whether the
- * last operation was an addition
- * or an insertion.
+ * simpler). The way we do it
+ * is to, for each access
+ * operation, store whether it
+ * is an insertion or an
+ * addition. If the previous
+ * one was of different type,
+ * then we first have to flush
+ * the Trilinos buffers;
+ * otherwise, we can simply go
+ * on. Luckily, Trilinos has
+ * an object for this which
+ * does already all the
+ * parallel communications in
+ * such a case, so we simply
+ * use their model, which
+ * stores whether the last
+ * operation was an addition or
+ * an insertion.
*/
Epetra_CombineMode last_action;
public:
/**
- * An Epetra distibuted vector type.
- * Requires an existing Epetra_Map for
- * storing data.
- * TODO: Should become private at some
- * point.
+ * An Epetra distibuted vector
+ * type. Requires an existing
+ * Epetra_Map for storing data.
+ * TODO: Should become private
+ * at some point.
*/
std::auto_ptr<Epetra_FEVector> vector;
/**
- * Make the reference class a friend.
+ * Make the reference class a
+ * friend.
*/
friend class internal::VectorReference;
// ------------------- inline and template functions --------------
/**
- * Global function swap which overloads the default implementation
- * of the C standard library which uses a temporary object. The
- * function simply exchanges the data of the two vectors.
+ * Global function swap which overloads the default implementation of
+ * the C standard library which uses a temporary object. The function
+ * simply exchanges the data of the two vectors.
*
* @ingroup TrilinosWrappers
* @relates TrilinosWrappers::Vector
const VectorReference &
VectorReference::operator = (const VectorReference &r) const
{
- // as explained in the class
- // documentation, this is not the copy
- // operator. so simply pass on to the
- // "correct" assignment operator
+ // as explained in the class
+ // documentation, this is not the copy
+ // operator. so simply pass on to the
+ // "correct" assignment operator
*this = static_cast<TrilinosScalar> (r);
return *this;
{
// if the vectors have different sizes,
// then first resize the present one
- if (!map.SameAs(v.map))
+ if (! map->SameAs(*v.map))
{
map = v.map;
- vector = std::auto_ptr<Epetra_FEVector>
+ vector = std::auto_ptr<Epetra_FEVector>
(new Epetra_FEVector(*v.vector));
}
else
const unsigned int n_rows = matrix.m();
const unsigned int null_space_dimension = null_space.size();
- // Build the AMG preconditioner.
+ // Build the AMG preconditioner.
Teuchos::ParameterList parameter_list;
if (elliptic)
ExcDimensionMismatch(n_rows,
null_space[0].size()));
- // Reshape null space as a contiguous
- // vector of doubles so that Trilinos
- // can read from it.
+ // Reshape null space as a
+ // contiguous vector of
+ // doubles so that Trilinos
+ // can read from it.
null_space_modes.resize (n_rows * null_space_dimension, 0.);
for (unsigned int d=0; d<null_space_dimension; ++d)
for (unsigned int row=0; row<n_rows; ++row)
{
const unsigned int n_rows = deal_ii_sparse_matrix.m();
- // Init Epetra Matrix, avoid
- // storing the nonzero elements.
+ // Init Epetra Matrix, avoid
+ // storing the nonzero
+ // elements.
Map.reset (new Epetra_Map(n_rows, 0, communicator));
}
- // For the implementation of the
- // <code>vmult</code> function we
- // note that invoking a call of
- // the Trilinos preconditioner
- // requires us to use Epetra vectors
- // as well. It is faster
- // to provide a view, i.e., feed
- // Trilinos with a pointer to the
- // data, so we avoid copying the
- // content of the vectors during
- // the iteration. In the declaration
- // of the right hand side, we need
- // to cast the source vector (that
- // is <code>const</code> in all deal.II
- // calls) to non-constant value, as
- // this is the way Trilinos wants to
- // have them.
+ // For the implementation of
+ // the <code>vmult</code>
+ // function we note that
+ // invoking a call of the
+ // Trilinos preconditioner
+ // requires us to use Epetra
+ // vectors as well. It is
+ // faster to provide a view,
+ // i.e., feed Trilinos with a
+ // pointer to the data, so we
+ // avoid copying the content
+ // of the vectors during the
+ // iteration. In the
+ // declaration of the right
+ // hand side, we need to cast
+ // the source vector (that is
+ // <code>const</code> in all
+ // deal.II calls) to
+ // non-constant value, as this
+ // is the way Trilinos wants
+ // to have them.
void PreconditionAMG::vmult (dealii::Vector<double> &dst,
const dealii::Vector<double> &src) const
{
// would it point to?)
Assert (ncols != 0, ExcInternalError());
colnum_cache.reset (new std::vector<unsigned int> (colnums,
- colnums+ncols));
+ colnums+ncols));
value_cache.reset (new std::vector<TrilinosScalar> (values, values+ncols));
}
}
SparseMatrix::SparseMatrix ()
:
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- row_map (0, 0, Epetra_MpiComm(MPI_COMM_WORLD)),
+ dummy_map (new Epetra_Map (0, 0, Epetra_MpiComm(MPI_COMM_WORLD))),
#else
- row_map (0, 0, Epetra_SerialComm()),
+ dummy_map (new Epetra_Map (0, 0, Epetra_SerialComm())),
#endif
+ row_map (const_cast<Epetra_Map*>(dummy_map)),
col_map (row_map),
last_action (Insert),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, 0)))
+ (new Epetra_FECrsMatrix(Copy, *row_map, 0)))
{}
SparseMatrix::SparseMatrix (const Epetra_Map &InputMap,
const unsigned int n_max_entries_per_row)
:
- row_map (InputMap),
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ dummy_map (new Epetra_Map (0, 0, Epetra_MpiComm(MPI_COMM_WORLD))),
+#else
+ dummy_map (new Epetra_Map (0, 0, Epetra_SerialComm())),
+#endif
+ row_map (const_cast<Epetra_Map*>(&InputMap)),
col_map (row_map),
last_action (Insert),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map,
+ (new Epetra_FECrsMatrix(Copy, *row_map,
int(n_max_entries_per_row), false)))
{}
- SparseMatrix::SparseMatrix (const Epetra_Map &InputMap,
+ SparseMatrix::SparseMatrix (const Epetra_Map &InputMap,
const std::vector<unsigned int> &n_entries_per_row)
:
- row_map (InputMap),
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ dummy_map (new Epetra_Map (0, 0, Epetra_MpiComm(MPI_COMM_WORLD))),
+#else
+ dummy_map (new Epetra_Map (0, 0, Epetra_SerialComm())),
+#endif
+ row_map (const_cast<Epetra_Map*>(&InputMap)),
col_map (row_map),
last_action (Insert),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map,
+ (new Epetra_FECrsMatrix(Copy, *row_map,
(int*)const_cast<unsigned int*>(&(n_entries_per_row[0])),
true)))
{}
const Epetra_Map &InputColMap,
const unsigned int n_max_entries_per_row)
:
- row_map (InputRowMap),
- col_map (InputColMap),
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ dummy_map (new Epetra_Map (0, 0, Epetra_MpiComm(MPI_COMM_WORLD))),
+#else
+ dummy_map (new Epetra_Map (0, 0, Epetra_SerialComm())),
+#endif
+ row_map (const_cast<Epetra_Map*>(&InputRowMap)),
+ col_map (const_cast<Epetra_Map*>(&InputColMap)),
last_action (Insert),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, col_map,
+ (new Epetra_FECrsMatrix(Copy, *row_map, *col_map,
int(n_max_entries_per_row), false)))
{}
- SparseMatrix::SparseMatrix (const Epetra_Map &InputRowMap,
- const Epetra_Map &InputColMap,
+ SparseMatrix::SparseMatrix (const Epetra_Map &InputRowMap,
+ const Epetra_Map &InputColMap,
const std::vector<unsigned int> &n_entries_per_row)
:
- row_map (InputRowMap),
- col_map (InputColMap),
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ dummy_map (new Epetra_Map (0, 0, Epetra_MpiComm(MPI_COMM_WORLD))),
+#else
+ dummy_map (new Epetra_Map (0, 0, Epetra_SerialComm())),
+#endif
+ row_map (const_cast<Epetra_Map*>(&InputRowMap)),
+ col_map (const_cast<Epetra_Map*>(&InputColMap)),
last_action (Insert),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, col_map,
+ (new Epetra_FECrsMatrix(Copy, *row_map, *col_map,
(int*)const_cast<unsigned int*>(&(n_entries_per_row[0])),
true)))
{}
SparseMatrix::~SparseMatrix ()
{
+ delete dummy_map;
}
// sparsity pattern on that
// processor, and only broadcast the
// pattern afterwards.
- if (row_map.Comm().MyPID() == 0)
+ if (row_map->Comm().MyPID() == 0)
{
Assert (matrix->NumGlobalRows() == (int)sparsity_pattern.n_rows(),
ExcDimensionMismatch (matrix->NumGlobalRows(),
// column map. Maybe find something
// more out about this...
//reinit (input_map, input_map, sparsity_pattern);
- matrix.reset();
+
+ matrix.reset();
unsigned int n_rows = sparsity_pattern.n_rows();
- if (row_map.Comm().MyPID() == 0)
+ if (row_map->Comm().MyPID() == 0)
{
Assert (input_map.NumGlobalElements() == (int)sparsity_pattern.n_rows(),
ExcDimensionMismatch (input_map.NumGlobalElements(),
sparsity_pattern.n_cols()));
}
- row_map = input_map;
- col_map = input_map;
+ row_map = const_cast<Epetra_Map*>(&input_map);
+ col_map = row_map;
std::vector<int> n_entries_per_row(n_rows);
n_entries_per_row[(int)row] = sparsity_pattern.row_length(row);
matrix = std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, &n_entries_per_row[0],
+ (new Epetra_FECrsMatrix(Copy, *row_map, &n_entries_per_row[0],
false));
reinit (sparsity_pattern);
unsigned int n_rows = sparsity_pattern.n_rows();
- if (row_map.Comm().MyPID() == 0)
+ if (input_row_map.Comm().MyPID() == 0)
{
Assert (input_row_map.NumGlobalElements() == (int)sparsity_pattern.n_rows(),
ExcDimensionMismatch (input_row_map.NumGlobalElements(),
sparsity_pattern.n_cols()));
}
- row_map = input_row_map;
- col_map = input_col_map;
+ row_map = const_cast<Epetra_Map*>(&input_row_map);
+ col_map = const_cast<Epetra_Map*>(&input_col_map);
std::vector<int> n_entries_per_row(n_rows);
n_entries_per_row[(int)row] = sparsity_pattern.row_length(row);
matrix = std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, col_map,
+ (new Epetra_FECrsMatrix(Copy, *row_map, *col_map,
&n_entries_per_row[0], false));
reinit (sparsity_pattern);
ExcDimensionMismatch (input_col_map.NumGlobalElements(),
dealii_sparse_matrix.n()));
- row_map = input_row_map;
- col_map = input_col_map;
+ row_map = const_cast<Epetra_Map*>(&input_row_map);
+ col_map = const_cast<Epetra_Map*>(&input_col_map);
std::vector<int> n_entries_per_row(n_rows);
dealii_sparse_matrix.get_sparsity_pattern().row_length(row);
matrix = std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, col_map,
+ (new Epetra_FECrsMatrix(Copy, *row_map, *col_map,
&n_entries_per_row[0], true));
std::vector<double> values;
// the pointer and generate an
// empty matrix.
matrix.reset();
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- row_map = Epetra_Map (0,0,Epetra_MpiComm(MPI_COMM_WORLD)),
-#else
- row_map = Epetra_Map (0,0,Epetra_SerialComm()),
-#endif
+
+ row_map = const_cast<Epetra_Map*>(dummy_map);
+ col_map = row_map;
matrix = std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, 0));
+ (new Epetra_FECrsMatrix(Copy, *row_map, 0));
}
{
// flush buffers
int ierr;
- if (row_map.SameAs(col_map))
+ if (row_map->SameAs(*col_map))
ierr = matrix->GlobalAssemble ();
else
- ierr = matrix->GlobalAssemble (col_map, row_map);
+ ierr = matrix->GlobalAssemble (*col_map, *row_map);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
if (last_action == Add)
{
int ierr;
- if (row_map.SameAs(col_map))
+ if (row_map->SameAs(*col_map))
ierr = matrix->GlobalAssemble (false);
else
- ierr = matrix->GlobalAssemble(col_map, row_map, false);
+ ierr = matrix->GlobalAssemble(*col_map, *row_map, false);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
last_action = Insert;
if (last_action == Insert)
{
int ierr;
- if (row_map.SameAs(col_map))
+ if (row_map->SameAs(*col_map))
ierr = matrix->GlobalAssemble (false);
else
- ierr = matrix->GlobalAssemble(col_map, row_map, false);
+ ierr = matrix->GlobalAssemble(*col_map, *row_map, false);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
{
Assert (m() == n(), ExcNotQuadratic());
+ // Trilinos doesn't seem to have a
+ // more efficient way to access the
+ // diagonal than by just using the
+ // standard el(i,j) function.
return el(i,i);
}
{
Assert (row < m(), ExcInternalError());
- // get a representation of the present
- // row
+ // get a representation of the
+ // present row
int ncols = -1;
int local_row = matrix->RowMap().LID(row);
- // on the processor who owns this
- // row, we'll have a non-negative
- // value.
+ // on the processor who owns this
+ // row, we'll have a non-negative
+ // value.
if (local_row >= 0)
{
int ierr = matrix->NumMyRowEntries (local_row, ncols);
{
Assert (&src != &dst, ExcSourceEqualsDestination());
- Assert (col_map.SameAs(src.vector->Map()),
+ Assert (col_map->SameAs(src.vector->Map()),
ExcMessage ("Column map of matrix does not fit with vector map!"));
- Assert (row_map.SameAs(dst.vector->Map()),
+ Assert (row_map->SameAs(dst.vector->Map()),
ExcMessage ("Row map of matrix does not fit with vector map!"));
if (!matrix->Filled())
{
Assert (&src != &dst, ExcSourceEqualsDestination());
- Assert (row_map.SameAs(src.vector->Map()),
+ Assert (row_map->SameAs(src.vector->Map()),
ExcMessage ("Row map of matrix does not fit with vector map!"));
- Assert (col_map.SameAs(dst.vector->Map()),
+ Assert (col_map->SameAs(dst.vector->Map()),
ExcMessage ("Column map of matrix does not fit with vector map!"));
if (!matrix->Filled())
Assert (rhs.m() == m(), ExcDimensionMismatch (rhs.m(), m()));
Assert (rhs.n() == n(), ExcDimensionMismatch (rhs.n(), n()));
- // I bet that there must be a better way
- // to do this but it has not been found:
- // currently, we simply go through each
- // row of the argument matrix, copy it,
- // scale it, and add it to the current
- // matrix. that's probably not the most
- // efficient way to do things.
+ // I bet that there must be a
+ // better way to do this but it
+ // has not been found: currently,
+ // we simply go through each row
+ // of the argument matrix, copy
+ // it, scale it, and add it to
+ // the current matrix. that's
+ // probably not the most
+ // efficient way to do things.
const std::pair<unsigned int, unsigned int>
local_range = rhs.local_range();
- // TODO: Currently this only flips
- // a flag that tells Trilinos that
- // any application should be done with
- // the transpose. However, the matrix
- // structure is not reset.
+ // TODO: Currently this only flips a
+ // flag that tells Trilinos that any
+ // application should be done with
+ // the transpose. However, the
+ // matrix structure is not
+ // reset. Can we leave it like this?
void
SparseMatrix::transpose ()
{
// As of now, no particularly neat
- // ouput is generated in case of
+ // ouput is generated in case of
// multiple processors.
void SparseMatrix::print (std::ostream &out) const
{
Assert (index < vector.size(),
ExcIndexRange (index, 0, vector.size()));
- // Trilinos allows for vectors to be
- // referenced by the [] or () operators
- // but only () checks index bounds
- // Also, can only get local values
+ // Trilinos allows for vectors
+ // to be referenced by the []
+ // or () operators but only ()
+ // checks index bounds Also,
+ // can only get local values
- AssertThrow ((static_cast<signed int>(index) >= vector.map.MinMyGID()) &&
- (static_cast<signed int>(index) <= vector.map.MaxMyGID()),
- ExcAccessToNonLocalElement (index, vector.map.MinMyGID(),
- vector.map.MaxMyGID()-1));
+ AssertThrow ((static_cast<signed int>(index) >= vector.map->MinMyGID()) &&
+ (static_cast<signed int>(index) <= vector.map->MaxMyGID()),
+ ExcAccessToNonLocalElement (index, vector.map->MinMyGID(),
+ vector.map->MaxMyGID()-1));
return (*(vector.vector))[0][index];
}
}
+
+
Vector::Vector ()
:
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- map (0,0,Epetra_MpiComm(MPI_COMM_WORLD)),
+ dummy_map (new Epetra_Map (0,0,Epetra_MpiComm(MPI_COMM_WORLD))),
#else
- map (0,0,Epetra_SerialComm()),
+ dummy_map (new Epetra_Map (0,0,Epetra_SerialComm())),
#endif
+ map (const_cast<Epetra_Map*>(dummy_map)),
last_action (Insert),
vector(std::auto_ptr<Epetra_FEVector>
- (new Epetra_FEVector(map)))
+ (new Epetra_FEVector(*map)))
{}
- Vector::Vector (unsigned int GlobalSize, Epetra_Comm &Comm)
- :
- map (GlobalSize, 0, Comm),
- last_action (Insert),
- vector (std::auto_ptr<Epetra_FEVector>
- (new Epetra_FEVector(map)))
- {}
Vector::Vector (const Epetra_Map &InputMap)
:
- map (InputMap),
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ dummy_map (new Epetra_Map (0,0,Epetra_MpiComm(MPI_COMM_WORLD))),
+#else
+ dummy_map (new Epetra_Map (0,0,Epetra_SerialComm())),
+#endif
+ map (const_cast<Epetra_Map*>(&InputMap)),
last_action (Insert),
vector (std::auto_ptr<Epetra_FEVector>
- (new Epetra_FEVector(map)))
+ (new Epetra_FEVector(*map)))
{}
+
Vector::Vector (const Vector &v,
const bool fast)
:
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ dummy_map (new Epetra_Map (0,0,Epetra_MpiComm(MPI_COMM_WORLD))),
+#else
+ dummy_map (new Epetra_Map (0,0,Epetra_SerialComm())),
+#endif
map (v.map),
last_action (Insert),
vector(std::auto_ptr<Epetra_FEVector>
- (new Epetra_FEVector(v.map,!fast)))
+ (new Epetra_FEVector(*map,!fast)))
{}
Vector::~Vector ()
- {}
+ {
+ vector.reset();
+ delete dummy_map;
+ }
Vector::reinit (const Epetra_Map &input_map)
{
vector.reset();
- map = input_map;
+ map = const_cast<Epetra_Map*> (&input_map);
- vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(input_map));
+ vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(*map));
last_action = Insert;
}
{
vector.reset();
- if (!map.SameAs(v.map))
- map = v.map;
+ map = v.map;
- vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(v.map,!fast));
+ vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(*map,!fast));
last_action = Insert;
}
void
Vector::clear ()
{
- // When we clear the matrix,
- // reset the pointer and
- // generate an empty matrix.
+ // When we clear the vector,
+ // reset the pointer and generate
+ // an empty vector.
vector.reset();
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- map = Epetra_Map (0,0,Epetra_MpiComm(MPI_COMM_WORLD)),
-#else
- map = Epetra_Map (0,0,Epetra_SerialComm()),
-#endif
+ map = dummy_map;
- vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
+ vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(*map));
last_action = Insert;
}
TrilinosScalar
Vector::el (const unsigned int index) const
{
- // Extract local indices in
- // the vector.
- int trilinos_i = map.LID(index);
+ // Extract local indices in
+ // the vector.
+ int trilinos_i = map->LID(index);
TrilinosScalar value = 0.;
if (trilinos_i == -1 )
{
Vector::real_type
Vector::lp_norm (const TrilinosScalar p) const
{
- // get a representation of the vector and
- // loop over all the elements
+ // get a representation of the
+ // vector and loop over all
+ // the elements
TrilinosScalar *start_ptr;
int leading_dimension;
int ierr = vector->ExtractView (&start_ptr, &leading_dimension);
TrilinosScalar sum=0;
const TrilinosScalar * ptr = start_ptr;
+
// add up elements
while (ptr != start_ptr+size())
sum += std::pow(std::fabs(*ptr++), p);
ExcMessage("The given value is not finite but "
"either infinite or Not A Number (NaN)"));
- // Update member can only input
- // two other vectors so
- // do it in two steps
+ // Update member can only
+ // input two other vectors so
+ // do it in two steps
const int ierr = vector->Update(a, *(v.vector), b, *(w.vector), s);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- // TODO: up to now only local data
- // printed out! Find a way to neatly
- // output distributed data...
+ // TODO: up to now only local
+ // data printed out! Find a
+ // way to neatly output
+ // distributed data...
void
Vector::print (const char *format) const
{
{
AssertThrow (out, ExcIO());
- // get a representation of the vector and
- // loop over all the elements
- // TODO: up to now only local data
- // printed out! Find a way to neatly
- // output distributed data...
+ // get a representation of the
+ // vector and loop over all
+ // the elements TODO: up to
+ // now only local data printed
+ // out! Find a way to neatly
+ // output distributed data...
TrilinosScalar *val;
int leading_dimension;
int ierr = vector->ExtractView (&val, &leading_dimension);
out << static_cast<double>(val[i]) << std::endl;
out << std::endl;
- // restore the representation of the
- // vector
+ // restore the representation
+ // of the vector
AssertThrow (out, ExcIO());
}
void
Vector::swap (Vector &v)
{
- // Just swap the pointers to the
- // two Epetra vectors that hold all
- // the data.
+ // Just swap the pointers to
+ // the two Epetra vectors that
+ // hold all the data.
Vector *p_v = &v, *p_this = this;
Vector* tmp = p_v;
p_v = p_this;
}
+
unsigned int
Vector::memory_consumption () const
{