* This class implements a wrapper to use the Trilinos distributed sparse matrix
* class Epetra_FECrsMatrix. This is precisely the kind of matrix we deal with
* all the time - we most likely get it from some assembly process, where also
- * entries not locally owned might need to written and hence need to be
- * forwarded to the owner. This class is designed to be used in a distributed
+ * entries not locally owned might need to be written and hence need to be
+ * forwarded to the owner process.
+ * This class is designed to be used in a distributed
* memory architecture with an MPI compiler on the bottom, but works equally
* well also for serial processes. The only requirement for this class to
- * work is that Trilinos is installed with the respective compiler as a
- * basis.
+ * work is that Trilinos has been installed with the same compiler as is used
+ * for generating deal.II.
*
* The interface of this class is modeled after the existing
* SparseMatrix class in deal.II. It has almost the same member
* functions, and is often exchangable. However, since Trilinos only supports a
- * single scalar type (double), it is
- * not templated, and only works with doubles.
+ * single scalar type (double), it is not templated, and only works with
+ * doubles.
*
* Note that Trilinos only guarantees that operations do what you expect if the
- * functions @p GlobalAssemble has been called
- * after matrix assembly. Therefore, you need to call
+ * functions @p GlobalAssemble has been called after matrix assembly.
+ * Therefore, you need to call
* SparseMatrix::compress() before you actually use the matrix. This also
* calls @p FillComplete that compresses the storage format for sparse
* matrices by discarding unused elements. Trilinos allows to continue with
/**
* Constructor using an Epetra_Map
* and a maximum number of nonzero
- * matrix entries.
+ * matrix entries. Note that this
+ * number does not need to be exact,
+ * and it is even allowed that
+ * the actual matrix structure
+ * has more nonzero entries than
+ * specified in the constructor.
+ * However it is still advantageous to
+ * provide good estimates here since
+ * this will considerably increase
+ * the performance of the matrix.
*/
SparseMatrix (const Epetra_Map &InputMap,
const unsigned int n_max_entries_per_row);
* can use pointers to this class.
*/
virtual ~SparseMatrix ();
-
- /**
- * This function initializes the
- * Trilinos matrix by attaching all
- * the elements to the sparsity
- * pattern provided as deal argument.
- * This function uses a user-
- * provided maximum number of
- * elements per row. If that is
- * not directly available, use one of the
- * other reinit functions.
- */
- void reinit (const SparsityPattern &sparsity_pattern,
- const unsigned int n_max_entries_per_row);
/**
* This function initializes the
- * Trilinos matrix by attaching all
- * the elements to the sparsity
- * pattern provided as deal argument,
- * now calculating the maximum number
- * of nonzeros from the sparsity
- * pattern internally.
+ * Trilinos matrix with a deal.II
+ * sparsity pattern, i.e. it
+ * makes the Trilinos Epetra
+ * matrix know the position of
+ * nonzero entries according to
+ * the sparsity pattern. Note that,
+ * when using this function, the
+ * matrix must already be initialized
+ * with a suitable Epetra_Map that
+ * describes the distribution of
+ * the matrix among the MPI
+ * processes. Otherwise, an
+ * error will be thrown.
*/
void reinit (const SparsityPattern &sparsity_pattern);
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries stored
- * therein. It uses a threshold
- * to copy only elements whose
- * modulus is larger than the
- * threshold (so zeros in the
- * deal.II matrix can be filtered
- * away).
- */
- void reinit (const Epetra_Map &input_map,
- const ::dealii::SparseMatrix<double> &deal_ii_sparse_matrix,
- const double drop_tolerance=1e-13);
-
/**
- * This function is similar to the
- * other initialization function above,
- * but now also reassigns the matrix
- * rows according to a user-supplied
- * Epetra map. This might be used
+ * This function is initializes the
+ * Trilinos Epetra matrix according
+ * to the specified sparsity_pattern,
+ * and also reassigns the matrix
+ * rows to different processes
+ * according to a user-supplied
+ * Epetra map. This might be useful
* when the matrix structure changes,
* e.g. when the grid is refined.
*/
* but now also reassigns the matrix
* rows and columns according to
* two user-supplied Epetra maps.
- * To be used e.g. for rectangular
- * matrices after remeshing.
+ * To be used for rectangular
+ * matrices.
*/
void reinit (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
const SparsityPattern &sparsity_pattern);
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries stored
+ * therein. It uses a threshold
+ * to copy only elements with
+ * modulus larger than the
+ * threshold (so zeros in the
+ * deal.II matrix can be filtered
+ * away).
+ */
+ void reinit (const Epetra_Map &input_map,
+ const ::dealii::SparseMatrix<double> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13);
+
+ /**
+ * This function is similar to the
+ * other initialization function with
+ * deal.II sparse matrix input above,
+ * but now takes Epetra maps for both
+ * the rows and the columns of the
+ * matrix.
+ * To be used for rectangular
+ * matrices.
+ */
+ void reinit (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
+ const ::dealii::SparseMatrix<double> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13);
+
/**
* Release all memory and return
* to a state just like after
SparseMatrix::const_iterator::Accessor::
visit_present_row ()
{
- // if we are asked to visit the
- // past-the-end line, then simply
- // release all our caches and go on
- // with life
+ // if we are asked to visit the
+ // past-the-end line, then simply
+ // release all our caches and go on
+ // with life
if (this->a_row == matrix->m())
- {
- colnum_cache.reset ();
- value_cache.reset ();
+ {
+ colnum_cache.reset ();
+ value_cache.reset ();
- return;
- }
+ return;
+ }
- // otherwise first flush Trilinos caches
+ // otherwise first flush Trilinos caches
matrix->compress ();
- // get a representation of the present
- // row
+ // get a representation of the present
+ // row
int ncols;
int colnums = matrix->n();
TrilinosScalar *values = new TrilinosScalar(colnums);
ncols, &(values[0]));
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- // copy it into our caches if the line
- // isn't empty. if it is, then we've
- // done something wrong, since we
- // shouldn't have initialized an
- // iterator for an empty line (what
- // would it point to?)
+ // copy it into our caches if the line
+ // isn't empty. if it is, then we've
+ // done something wrong, since we
+ // shouldn't have initialized an
+ // iterator for an empty line (what
+ // would it point to?)
Assert (ncols != 0, ExcInternalError());
colnum_cache.reset (new std::vector<unsigned int> (colnums,
- colnums+ncols));
+ colnums+ncols));
value_cache.reset (new std::vector<TrilinosScalar> (values, values+ncols));
}
}
// The constructor is actually the
// only point where we have to check
- // whether we build a serial or
- // a parallel Trilinos matrix.
- // In the end, it does not even
- // matter how many threads there
- // are, but only if we use an
- // MPI compiler or a standard
+ // whether we build a serial or
+ // a parallel Trilinos matrix.
+ // In the end, it does not even
+ // matter how many threads there
+ // are, but only if we use an
+ // MPI compiler or a standard
// compiler. So, one thread on
- // an MPI compiler will still get
- // a parallel interface.
+ // an MPI compiler will still get
+ // a parallel interface.
SparseMatrix::SparseMatrix ()
- :
+ :
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
row_map (0, 0, Epetra_MpiComm(MPI_COMM_WORLD)),
#else
#endif
col_map (row_map),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, 0))),
- last_action (Insert)
+ (new Epetra_FECrsMatrix(Copy, row_map, 0))),
+ last_action (Insert)
{}
SparseMatrix::SparseMatrix (const Epetra_Map &InputMap,
const unsigned int n_max_entries_per_row)
- :
+ :
row_map (InputMap),
col_map (row_map),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map,
+ (new Epetra_FECrsMatrix(Copy, row_map,
int(n_max_entries_per_row), false))),
- last_action (Insert)
+ last_action (Insert)
{}
SparseMatrix::SparseMatrix (const Epetra_Map &InputMap,
const std::vector<unsigned int> &n_entries_per_row)
- :
+ :
row_map (InputMap),
col_map (row_map),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
(new Epetra_FECrsMatrix(Copy, row_map,
(int*)const_cast<unsigned int*>(&(n_entries_per_row[0])),
- false))),
- last_action (Insert)
+ true))),
+ last_action (Insert)
{}
SparseMatrix::SparseMatrix (const Epetra_Map &InputRowMap,
const Epetra_Map &InputColMap,
const unsigned int n_max_entries_per_row)
- :
+ :
row_map (InputRowMap),
col_map (InputColMap),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
- (new Epetra_FECrsMatrix(Copy, row_map, col_map,
+ (new Epetra_FECrsMatrix(Copy, row_map, col_map,
int(n_max_entries_per_row), false))),
- last_action (Insert)
+ last_action (Insert)
{}
SparseMatrix::SparseMatrix (const Epetra_Map &InputRowMap,
const Epetra_Map &InputColMap,
const std::vector<unsigned int> &n_entries_per_row)
- :
+ :
row_map (InputRowMap),
col_map (InputColMap),
matrix (std::auto_ptr<Epetra_FECrsMatrix>
(new Epetra_FECrsMatrix(Copy, row_map, col_map,
(int*)const_cast<unsigned int*>(&(n_entries_per_row[0])),
- false))),
- last_action (Insert)
+ true))),
+ last_action (Insert)
{}
}
-
- void
- SparseMatrix::reinit (const SparsityPattern &sparsity_pattern,
- const unsigned int n_max_entries_per_row)
- {
-
- unsigned int n_rows = sparsity_pattern.n_rows();
-
- Assert (matrix->NumGlobalRows() == (int)sparsity_pattern.n_rows(),
- ExcDimensionMismatch (matrix->NumGlobalRows(),
- sparsity_pattern.n_rows()));
-
- std::vector<double> values(n_max_entries_per_row, 0.);
- std::vector<int> row_indices(n_max_entries_per_row);
-
- for (unsigned int row=0; row<n_rows; ++row)
- {
- const int row_length = sparsity_pattern.row_length(row);
- row_indices.resize (row_length, 0);
- values.resize (row_length, 0.);
-
- for (int col=0; col< row_length; ++col)
- row_indices[col] = sparsity_pattern.column_number (row, col);
-
- matrix->InsertGlobalValues(row, row_length,
- &values[0], &row_indices[0]);
- }
-
- // In the end, the matrix needs to
- // be compressed in order to be
- // really ready. However, that is
- // a collective operation, so it
- // has to be called on all processes
- // by the user, whereas this function
- // should only be used on one processor
- // since our sparsity pattern data
- // types are all serial.
- }
-
-
void
SparseMatrix::reinit (const SparsityPattern &sparsity_pattern)
// so do not check for consistent
// column numbers here.
//
+ //
// this bug is filed in the Sandia
// bugzilla under #4123 and should be
// fixed for version 9.0
for (unsigned int row=0; row<n_rows; ++row)
n_entries_per_row[(int)row] = sparsity_pattern.row_length(row);
- const unsigned int n_max_entries_per_row = *std::max_element (
- &n_entries_per_row[0], &n_entries_per_row[n_rows-1]);
+ std::vector<double> values;
+ std::vector<int> row_indices;
+
+ for (unsigned int row=0; row<n_rows; ++row)
+ {
+ const int row_length = sparsity_pattern.row_length(row);
+ row_indices.resize (row_length, 0);
+ values.resize (row_length, 0.);
+
+ for (int col=0; col< row_length; ++col)
+ row_indices[col] = sparsity_pattern.column_number (row, col);
+
+ matrix->InsertGlobalValues(row, row_length,
+ &values[0], &row_indices[0]);
+ }
- reinit (sparsity_pattern, n_max_entries_per_row);
+ // In the end, the matrix needs to
+ // be compressed in order to be
+ // really ready. However, that is
+ // a collective operation, so it
+ // has to be called on all processes
+ // by the user, whereas this function
+ // should only be used on one processor
+ // since our sparsity pattern data
+ // types are all serial as of now.
}
void
SparseMatrix::reinit (const Epetra_Map &input_map,
- const SparsityPattern &sparsity_pattern)
+ const SparsityPattern &sparsity_pattern)
{
- matrix.reset();
-
- unsigned int n_rows = sparsity_pattern.n_rows();
-
- Assert (input_map.NumGlobalElements() == (int)sparsity_pattern.n_rows(),
- ExcDimensionMismatch (input_map.NumGlobalElements(),
- sparsity_pattern.n_rows()));
- Assert (input_map.NumGlobalElements() == (int)sparsity_pattern.n_cols(),
- ExcDimensionMismatch (input_map.NumGlobalElements(),
- sparsity_pattern.n_cols()));
-
- row_map = input_map;
- col_map = row_map;
-
- std::vector<int> n_entries_per_row(n_rows);
-
- for (unsigned int row=0; row<n_rows; ++row)
- n_entries_per_row[(int)row] = sparsity_pattern.row_length(row);
-
- matrix = std::auto_ptr<Epetra_FECrsMatrix> (new Epetra_FECrsMatrix (
- Copy, row_map, &n_entries_per_row[0], true));
-
- const unsigned int n_max_entries_per_row = *std::max_element (
- &n_entries_per_row[0], &n_entries_per_row[n_rows-1]);
-
- reinit (sparsity_pattern, n_max_entries_per_row);
+ reinit (input_map, input_map, sparsity_pattern);
}
void
SparseMatrix::reinit (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const SparsityPattern &sparsity_pattern)
+ const SparsityPattern &sparsity_pattern)
{
matrix.reset();
matrix = std::auto_ptr<Epetra_FECrsMatrix>
(new Epetra_FECrsMatrix(Copy, row_map, col_map,
- &n_entries_per_row[0], true));
+ &n_entries_per_row[0], true));
- const unsigned int n_max_entries_per_row = *std::max_element (
- &n_entries_per_row[0], &n_entries_per_row[n_rows-1]);
-
- reinit (sparsity_pattern, n_max_entries_per_row);
+ reinit (sparsity_pattern);
}
void
SparseMatrix::reinit (const Epetra_Map &input_map,
- const ::dealii::SparseMatrix<double> &deal_ii_sparse_matrix,
- const double drop_tolerance)
+ const ::dealii::SparseMatrix<double> &dealii_sparse_matrix,
+ const double drop_tolerance)
+ {
+ reinit (input_map, input_map, dealii_sparse_matrix, drop_tolerance);
+ }
+
+
+
+ void
+ SparseMatrix::reinit (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
+ const ::dealii::SparseMatrix<double> &dealii_sparse_matrix,
+ const double drop_tolerance)
{
matrix.reset();
- unsigned int n_rows = deal_ii_sparse_matrix.m();
+ unsigned int n_rows = dealii_sparse_matrix.m();
- Assert (input_map.NumGlobalElements() == (int)n_rows,
- ExcDimensionMismatch (input_map.NumGlobalElements(),
+ Assert (input_row_map.NumGlobalElements() == (int)n_rows,
+ ExcDimensionMismatch (input_row_map.NumGlobalElements(),
n_rows));
- Assert (input_map.NumGlobalElements() == (int)deal_ii_sparse_matrix.n(),
- ExcDimensionMismatch (input_map.NumGlobalElements(),
- deal_ii_sparse_matrix.n()));
-
- row_map = input_map;
- col_map = row_map;
+ Assert (input_col_map.NumGlobalElements() == (int)dealii_sparse_matrix.n(),
+ ExcDimensionMismatch (input_col_map.NumGlobalElements(),
+ dealii_sparse_matrix.n()));
+
+ row_map = input_row_map;
+ col_map = input_col_map;
std::vector<int> n_entries_per_row(n_rows);
for (unsigned int row=0; row<n_rows; ++row)
n_entries_per_row[(int)row] =
- deal_ii_sparse_matrix.get_sparsity_pattern().row_length(row);
+ dealii_sparse_matrix.get_sparsity_pattern().row_length(row);
matrix = std::auto_ptr<Epetra_FECrsMatrix>
(new Epetra_FECrsMatrix(Copy, row_map, col_map,
- &n_entries_per_row[0], true));
-
+ &n_entries_per_row[0], true));
+
std::vector<double> values;
std::vector<int> row_indices;
unsigned int index = 0;
for (dealii::SparseMatrix<double>::const_iterator
- p = deal_ii_sparse_matrix.begin(row);
- p != deal_ii_sparse_matrix.end(row); ++p)
+ p = dealii_sparse_matrix.begin(row);
+ p != dealii_sparse_matrix.end(row); ++p)
if (std::fabs(p->value()) > drop_tolerance)
{
row_indices[index] = p->column();
const int n_row_entries = index;
matrix->InsertGlobalValues(row, n_row_entries,
- &values[0], &row_indices[0]);
+ &values[0], &row_indices[0]);
}
}
void
SparseMatrix::clear ()
{
- // When we clear the matrix,
- // reset the pointer and
- // generate an empty matrix.
+ // When we clear the matrix,
+ // reset the pointer and
+ // generate an empty matrix.
matrix.reset();
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
row_map = Epetra_Map (0,0,Epetra_MpiComm(MPI_COMM_WORLD)),
void
SparseMatrix::compress ()
{
- // flush buffers
+ // flush buffers
int ierr;
if (row_map.SameAs(col_map))
ierr = matrix->GlobalAssemble ();
Assert (numbers::is_finite(value),
ExcMessage("The given value is not finite but either "
- "infinite or Not A Number (NaN)"));
+ "infinite or Not A Number (NaN)"));
if (last_action == Add)
{
- int ierr;
+ int ierr;
if (row_map.SameAs(col_map))
ierr = matrix->GlobalAssemble (false);
else
ierr = matrix->GlobalAssemble(col_map, row_map, false);
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
last_action = Insert;
}
Assert (numbers::is_finite(value),
ExcMessage("The given value is not finite but either "
- "infinite or Not A Number (NaN)"));
+ "infinite or Not A Number (NaN)"));
if (last_action == Insert)
{
- int ierr;
+ int ierr;
if (row_map.SameAs(col_map))
ierr = matrix->GlobalAssemble (false);
else
ierr = matrix->GlobalAssemble(col_map, row_map, false);
-
+
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- last_action = Add;
+ last_action = Add;
}
- // we have to do above actions in any
- // case to be consistent with the MPI
- // communication model (see the
- // comments in the documentation of
- // TrilinosWrappers::Vector), but we
- // can save some work if the addend is
- // zero
+ // we have to do above actions in any
+ // case to be consistent with the MPI
+ // communication model (see the
+ // comments in the documentation of
+ // TrilinosWrappers::Vector), but we
+ // can save some work if the addend is
+ // zero
if (value == 0)
return;
Assert (matrix->Filled()==true,
ExcMessage("Matrix must be compressed before invoking clear_row."));
- // Only do this on the rows
- // owned locally on this processor.
+ // Only do this on the rows
+ // owned locally on this processor.
int local_row = matrix->LRID(row);
if (local_row >= 0)
{
values, col_indices);
Assert (ierr == 0,
- ExcTrilinosError(ierr));
+ ExcTrilinosError(ierr));
int* diag_find = std::find(col_indices,col_indices+num_entries,
- local_row);
+ local_row);
int diag_index = (int)(diag_find - col_indices);
for (int j=0; j<num_entries; ++j)
if (diag_index != col_indices[j])
- values[j] = 0.;
+ values[j] = 0.;
if (diag_find && std::fabs(values[diag_index]) > 0.)
values[diag_index] = new_diag_value;
SparseMatrix::el (const unsigned int i,
const unsigned int j) const
{
- // Extract local indices in
- // the matrix.
+ // Extract local indices in
+ // the matrix.
int trilinos_i = matrix->LRID(i), trilinos_j = matrix->LRID(j);
TrilinosScalar value = 0.;
- // If the data is not on the
- // present processor, we can't
- // continue.
+ // If the data is not on the
+ // present processor, we can't
+ // continue.
if ((trilinos_i == -1 ) || (trilinos_j == -1))
{
- Assert (false, ExcAccessToNonLocalElement(i, j, local_range().first,
+ Assert (false, ExcAccessToNonLocalElement(i, j, local_range().first,
local_range().second));
}
else
// finally get it.
int* el_find = std::find(&col_indices[0],&col_indices[0] + nnz_present,
- trilinos_j);
+ trilinos_j);
int el_index = (int)(el_find - col_indices);
{
Assert (row < m(), ExcInternalError());
- // get a representation of the present
- // row
+ // get a representation of the present
+ // row
int ncols = -1;
int local_row = matrix->RowMap().LID(row);
- // on the processor who owns this
- // row, we'll have a non-negative
- // value.
+ // on the processor who owns this
+ // row, we'll have a non-negative
+ // value.
if (local_row >= 0)
{
int ierr = matrix->NumMyRowEntries (local_row, ncols);
void
SparseMatrix::vmult (Vector &dst,
- const Vector &src) const
+ const Vector &src) const
{
Assert (&src != &dst, ExcSourceEqualsDestination());
void
SparseMatrix::vmult_add (Vector &dst,
- const Vector &src) const
+ const Vector &src) const
{
Assert (&src != &dst, ExcSourceEqualsDestination());
TrilinosScalar
SparseMatrix::matrix_scalar_product (const Vector &u,
- const Vector &v) const
+ const Vector &v) const
{
Vector tmp(v.map);
vmult (tmp, v);
// TODO: Currently this only flips
- // a flag that tells Trilinos that
- // any application should be done with
- // the transpose. However, the matrix
- // structure is not reset.
+ // a flag that tells Trilinos that
+ // any application should be done with
+ // the transpose. However, the matrix
+ // structure is not reset.
void
SparseMatrix::transpose ()
{
if (!matrix->UseTranspose())
{
- ierr = matrix->SetUseTranspose (true);
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ ierr = matrix->SetUseTranspose (true);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
}
else
{
- ierr = matrix->SetUseTranspose (false);
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ ierr = matrix->SetUseTranspose (false);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
}
}
- // As of now, no particularly neat
- // ouput is generated in case of
- // multiple processors.
+ // As of now, no particularly neat
+ // ouput is generated in case of
+ // multiple processors.
void SparseMatrix::print (std::ostream &out) const
{
double * values;
for (int i=0; i<matrix->NumMyRows(); ++i)
{
- matrix->ExtractMyRowView (i, num_entries, values, indices);
+ matrix->ExtractMyRowView (i, num_entries, values, indices);
for (int j=0; j<num_entries; ++j)
out << "(" << i << "," << indices[matrix->GRID(j)] << ") "
<< values[j] << std::endl;