#include <deal.II/base/config.h>
#include <deal.II/base/smartpointer.h>
#include <deal.II/base/utilities.h>
+#include <deal.II/base/parallel.h>
#include <deal.II/base/template_constraints.h>
#include <deal.II/lac/tridiagonal_matrix.h>
#include <deal.II/lac/vector_memory.h>
template <typename number> class Vector;
template <typename number> class SparseMatrix;
+namespace parallel
+{
+ namespace distributed
+ {
+ template <typename number> class Vector;
+ }
+}
/*! @addtogroup Preconditioners
*@{
* row where the first position after
* the diagonal is located.
*/
- std::vector<unsigned int> pos_right_of_diagonal;
+ std::vector<std::size_t> pos_right_of_diagonal;
};
{
this->PreconditionRelaxation<MATRIX>::initialize (rA, parameters);
- // in case we have a SparseMatrix class,
- // we can extract information about the
- // diagonal.
+ // in case we have a SparseMatrix class, we can extract information about
+ // the diagonal.
const SparseMatrix<typename MATRIX::value_type> *mat =
dynamic_cast<const SparseMatrix<typename MATRIX::value_type> *>(&*this->A);
- // calculate the positions first after
- // the diagonal.
+ // calculate the positions first after the diagonal.
if (mat != 0)
{
- const std::size_t *rowstart_ptr =
- mat->get_sparsity_pattern().get_rowstart_indices();
- const unsigned int *const colnums =
- mat->get_sparsity_pattern().get_column_numbers();
const unsigned int n = this->A->n();
- pos_right_of_diagonal.resize(n);
- for (unsigned int row=0; row<n; ++row, ++rowstart_ptr)
+ pos_right_of_diagonal.resize(n, static_cast<std::size_t>(-1));
+ for (unsigned int row=0; row<n; ++row)
{
- // find the first element in this line
- // which is on the right of the diagonal.
- // we need to precondition with the
- // elements on the left only.
- // note: the first entry in each
- // line denotes the diagonal element,
- // which we need not check.
- pos_right_of_diagonal[row] =
- Utilities::lower_bound (&colnums[*rowstart_ptr+1],
- &colnums[*(rowstart_ptr+1)],
- row)
- - colnums;
+ // find the first element in this line which is on the right of the
+ // diagonal. we need to precondition with the elements on the left
+ // only. note: the first entry in each line denotes the diagonal
+ // element, which we need not check.
+ typename SparseMatrix<typename MATRIX::value_type>::const_iterator
+ it = mat->begin(row)+1;
+ for ( ; it < mat->end(row); ++it)
+ if (it->column() > row)
+ break;
+ pos_right_of_diagonal[row] = it - mat->begin();
}
}
}
// need at least two iterations to have
// maximum and minimum eigenvalue
- if (it > data.eig_cg_n_iterations || (it > 2 &&
+ if (res == 0. ||
+ it > data.eig_cg_n_iterations || (it > 2 &&
res < data.eig_cg_residual))
break;
offdiagonal.push_back(std::sqrt(beta)/alpha);
}
- TridiagonalMatrix<double> T(diagonal.size(), true);
- for (unsigned int i=0; i<diagonal.size(); ++i)
+ if (diagonal.size() == 0)
+ min_eigenvalue = max_eigenvalue = 1.;
+ else
{
- T(i,i) = diagonal[i];
- if (i< diagonal.size()-1)
- T(i,i+1) = offdiagonal[i];
+ TridiagonalMatrix<double> T(diagonal.size(), true);
+ for (unsigned int i=0; i<diagonal.size(); ++i)
+ {
+ T(i,i) = diagonal[i];
+ if (i< diagonal.size()-1)
+ T(i,i+1) = offdiagonal[i];
+ }
+ T.compute_eigenvalues();
+ min_eigenvalue = T.eigenvalue(0);
+ if (diagonal.size() > 1)
+ max_eigenvalue = T.eigenvalue(T.n()-1);
+ else
+ max_eigenvalue = min_eigenvalue;
}
- T.compute_eigenvalues();
- min_eigenvalue = T.eigenvalue(0);
- max_eigenvalue = T.eigenvalue(T.n()-1);
}
// include a safety factor since the CG
+namespace internal
+{
+ namespace PreconditionChebyshev
+ {
+ // for deal.II vectors, perform updates for Chebyshev preconditioner all
+ // at once to reduce memory transfer. Here, we select between general
+ // vectors and deal.II vectors where we expand the loop over the (local)
+ // size of the vector
+
+ // generic part for non-deal.II vectors
+ template <typename VECTOR>
+ inline
+ void
+ vector_updates (const VECTOR &src,
+ const VECTOR &matrix_diagonal_inverse,
+ const bool start_zero,
+ const double factor1,
+ const double factor2,
+ VECTOR &update1,
+ VECTOR &update2,
+ VECTOR &dst)
+ {
+ if (start_zero)
+ {
+ dst.equ (factor2, src);
+ dst.scale (matrix_diagonal_inverse);
+ update1.equ(-1.,dst);
+ }
+ else
+ {
+ update2 -= src;
+ update2.scale (matrix_diagonal_inverse);
+ if (factor1 == 0.)
+ update1.equ(factor2, update2);
+ else
+ update1.sadd(factor1, factor2, update2);
+ dst -= update1;
+ }
+ }
+
+ // worker loop for deal.II vectors
+ template <typename Number>
+ struct VectorUpdatesRange : public parallel::ParallelForInteger
+ {
+ VectorUpdatesRange (const size_t size,
+ const Number *src,
+ const Number *matrix_diagonal_inverse,
+ const bool start_zero,
+ const Number factor1,
+ const Number factor2,
+ Number *update1,
+ Number *update2,
+ Number *dst)
+ :
+ src (src),
+ matrix_diagonal_inverse (matrix_diagonal_inverse),
+ start_zero (start_zero),
+ factor1 (factor1),
+ factor2 (factor2),
+ update1 (update1),
+ update2 (update2),
+ dst (dst)
+ {
+ if (size < internal::Vector::minimum_parallel_grain_size)
+ apply_to_subrange (0, size);
+ else
+ apply_parallel (0, size,
+ internal::Vector::minimum_parallel_grain_size);
+ }
+
+ ~VectorUpdatesRange()
+ {}
+
+ virtual void
+ apply_to_subrange (const size_t begin,
+ const size_t end) const
+ {
+ if (factor1 == Number())
+ {
+ if (start_zero)
+ for (unsigned int i=begin; i<end; ++i)
+ {
+ dst[i] = factor2 * src[i] * matrix_diagonal_inverse[i];
+ update1[i] = -dst[i];
+ }
+ else
+ for (unsigned int i=begin; i<end; ++i)
+ {
+ update1[i] = ((update2[i]-src[i]) *
+ factor2*matrix_diagonal_inverse[i]);
+ dst[i] -= update1[i];
+ }
+ }
+ else
+ for (unsigned int i=begin; i<end; ++i)
+ {
+ const Number update2i = ((update2[i] - src[i]) *
+ matrix_diagonal_inverse[i]);
+ update1[i] = factor1 * update1[i] + factor2 * update2i;
+ dst[i] -= update1[i];
+ }
+ }
+
+ const Number *src;
+ const Number *matrix_diagonal_inverse;
+ const bool start_zero;
+ const Number factor1;
+ const Number factor2;
+ mutable Number *update1;
+ mutable Number *update2;
+ mutable Number *dst;
+ };
+
+ // selection for deal.II vector
+ template <typename Number>
+ inline
+ void
+ vector_updates (const ::dealii::Vector<Number> &src,
+ const ::dealii::Vector<Number> &matrix_diagonal_inverse,
+ const bool start_zero,
+ const double factor1,
+ const double factor2,
+ ::dealii::Vector<Number> &update1,
+ ::dealii::Vector<Number> &update2,
+ ::dealii::Vector<Number> &dst)
+ {
+ VectorUpdatesRange<Number>(src.size(), src.begin(),
+ matrix_diagonal_inverse.begin(),
+ start_zero, factor1, factor2,
+ update1.begin(), update2.begin(), dst.begin());
+ }
+
+ // selection for parallel deal.II vector
+ template <typename Number>
+ inline
+ void
+ vector_updates (const parallel::distributed::Vector<Number> &src,
+ const parallel::distributed::Vector<Number> &matrix_diagonal_inverse,
+ const bool start_zero,
+ const double factor1,
+ const double factor2,
+ parallel::distributed::Vector<Number> &update1,
+ parallel::distributed::Vector<Number> &update2,
+ parallel::distributed::Vector<Number> &dst)
+ {
+ VectorUpdatesRange<Number>(src.local_size(), src.begin(),
+ matrix_diagonal_inverse.begin(),
+ start_zero, factor1, factor2,
+ update1.begin(), update2.begin(), dst.begin());
+ }
+ }
+}
+
+
+
template <class MATRIX, class VECTOR>
inline
void
double rhok = delta / theta, sigma = theta / delta;
if (data.nonzero_starting && !dst.all_zero())
{
- matrix_ptr->vmult (update1, dst);
- update1 -= src;
- update1 /= theta;
- update1.scale (data.matrix_diagonal_inverse);
- dst -= update1;
+ matrix_ptr->vmult (update2, dst);
+ internal::PreconditionChebyshev::vector_updates
+ (src, data.matrix_diagonal_inverse, false, 0., 1./theta, update1,
+ update2, dst);
}
else
- {
- dst.equ (1./theta, src);
- dst.scale (data.matrix_diagonal_inverse);
- update1.equ(-1.,dst);
- }
+ internal::PreconditionChebyshev::vector_updates
+ (src, data.matrix_diagonal_inverse, true, 0., 1./theta, update1,
+ update2, dst);
for (unsigned int k=0; k<data.degree; ++k)
{
matrix_ptr->vmult (update2, dst);
- update2 -= src;
- update2.scale (data.matrix_diagonal_inverse);
const double rhokp = 1./(2.*sigma-rhok);
const double factor1 = rhokp * rhok, factor2 = 2.*rhokp/delta;
rhok = rhokp;
- update1.sadd (factor1, factor2, update2);
- dst -= update1;
+ internal::PreconditionChebyshev::vector_updates
+ (src, data.matrix_diagonal_inverse, false, factor1, factor2, update1,
+ update2, dst);
}
}
double rhok = delta / theta, sigma = theta / delta;
if (data.nonzero_starting && !dst.all_zero())
{
- matrix_ptr->Tvmult (update1, dst);
- update1 -= src;
- update1 /= theta;
- update1.scale (data.matrix_diagonal_inverse);
- dst -= update1;
+ matrix_ptr->Tvmult (update2, dst);
+ internal::PreconditionChebyshev::vector_updates
+ (src, data.matrix_diagonal_inverse, false, 0., 1./theta, update1,
+ update2, dst);
}
else
- {
- dst.equ (1./theta, src);
- dst.scale (data.matrix_diagonal_inverse);
- update1.equ(-1.,dst);
- }
+ internal::PreconditionChebyshev::vector_updates
+ (src, data.matrix_diagonal_inverse, true, 0., 1./theta, update1,
+ update2, dst);
for (unsigned int k=0; k<data.degree-1; ++k)
{
matrix_ptr->Tvmult (update2, dst);
- update2 -= src;
- update2.scale (data.matrix_diagonal_inverse);
const double rhokp = 1./(2.*sigma-rhok);
const double factor1 = rhokp * rhok, factor2 = 2.*rhokp/delta;
rhok = rhokp;
- update1.sadd (factor1, factor2, update2);
- dst -= update1;
+ internal::PreconditionChebyshev::vector_updates
+ (src, data.matrix_diagonal_inverse, false, factor1, factor2, update1,
+ update2, dst);
}
}
const unsigned int row,
const unsigned int index);
+ /**
+ * Constructor.
+ */
+ Accessor (MatrixType *matrix,
+ const std::size_t index_with_matrix);
+
/**
* Constructor. Construct the end accessor for the given matrix.
*/
const unsigned int row,
const unsigned int index);
+ /**
+ * Constructor.
+ */
+ Accessor (MatrixType *matrix,
+ const std::size_t index);
+
/**
* Constructor. Construct the end accessor for the given matrix.
*/
const unsigned int row,
const unsigned int index);
+ /**
+ * Constructor. Create an iterator into the matrix @p matrix for the given
+ * index in the complete matrix (counting from the zeroth entry).
+ */
+ Iterator (MatrixType *matrix,
+ const std::size_t index_within_matrix);
+
/**
* Constructor. Create the end iterator for the given matrix.
*/
void precondition_SSOR (Vector<somenumber> &dst,
const Vector<somenumber> &src,
const number omega = 1.,
- const std::vector<unsigned int> &pos_right_of_diagonal=std::vector<unsigned int>()) const;
+ const std::vector<std::size_t> &pos_right_of_diagonal=std::vector<std::size_t>()) const;
/**
* Apply SOR preconditioning matrix to <tt>src</tt>.
number SparseMatrix<number>::diag_element (const unsigned int i) const
{
Assert (cols != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(), ExcNotQuadratic());
+ Assert (m() == n(), ExcNotQuadratic());
Assert (i<m(), ExcInvalidIndex1(i));
// Use that the first element in each row of a quadratic matrix is the main
number &SparseMatrix<number>::diag_element (const unsigned int i)
{
Assert (cols != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(), ExcNotQuadratic());
+ Assert (m() == n(), ExcNotQuadratic());
Assert (i<m(), ExcInvalidIndex1(i));
// Use that the first element in each row of a quadratic matrix is the main
+ template <typename number>
+ inline
+ Accessor<number,true>::
+ Accessor (const MatrixType *matrix,
+ const std::size_t index)
+ :
+ SparsityPatternIterators::Accessor (&matrix->get_sparsity_pattern(),
+ index),
+ matrix (matrix)
+ {}
+
+
+
template <typename number>
inline
Accessor<number,true>::
number
Accessor<number, true>::value () const
{
- return matrix->nth_entry_in_row(a_row, a_index);
+ AssertIndexRange(a_index, matrix->n_nonzero_elements());
+ return matrix->val[a_index];
}
inline
Accessor<number, false>::Reference::operator number() const
{
- return accessor->matrix->nth_entry_in_row(accessor->a_row,
- accessor->a_index);
+ AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements());
+ return accessor->matrix->val[accessor->a_index];
}
const typename Accessor<number, false>::Reference &
Accessor<number, false>::Reference::operator = (const number n) const
{
-//TODO: one could optimize this by not going again through the mapping from row/col index to global index
- accessor->matrix->set (accessor->row(), accessor->column(), n);
+ AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements());
+ accessor->matrix->val[accessor->a_index] = n;
return *this;
}
const typename Accessor<number, false>::Reference &
Accessor<number, false>::Reference::operator += (const number n) const
{
-//TODO: one could optimize this by not going again through the mapping from row/col index to global index
- accessor->matrix->set (accessor->row(), accessor->column(),
- static_cast<number>(*this) + n);
+ AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements());
+ accessor->matrix->val[accessor->a_index] += n;
return *this;
}
const typename Accessor<number, false>::Reference &
Accessor<number, false>::Reference::operator -= (const number n) const
{
-//TODO: one could optimize this by not going again through the mapping from row/col index to global index
- accessor->matrix->set (accessor->row(), accessor->column(),
- static_cast<number>(*this) - n);
+ AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements());
+ accessor->matrix->val[accessor->a_index] -= n;
return *this;
}
const typename Accessor<number, false>::Reference &
Accessor<number, false>::Reference::operator *= (const number n) const
{
-//TODO: one could optimize this by not going again through the mapping from row/col index to global index
- accessor->matrix->set (accessor->row(), accessor->column(),
- static_cast<number>(*this)*n);
+ AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements());
+ accessor->matrix->val[accessor->a_index] *= n;
return *this;
}
const typename Accessor<number, false>::Reference &
Accessor<number, false>::Reference::operator /= (const number n) const
{
-//TODO: one could optimize this by not going again through the mapping from row/col index to global index
- accessor->matrix->set (accessor->row(), accessor->column(),
- static_cast<number>(*this)/n);
+ AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements());
+ accessor->matrix->val[accessor->a_index] /= n;
return *this;
}
+ template <typename number>
+ inline
+ Accessor<number,false>::
+ Accessor (MatrixType *matrix,
+ const std::size_t index)
+ :
+ SparsityPatternIterators::Accessor (&matrix->get_sparsity_pattern(),
+ index),
+ matrix (matrix)
+ {}
+
+
+
template <typename number>
inline
Accessor<number,false>::
+ template <typename number, bool Constness>
+ inline
+ Iterator<number, Constness>::
+ Iterator (MatrixType *matrix,
+ const std::size_t index)
+ :
+ accessor(matrix, index)
+ {}
+
+
+
template <typename number, bool Constness>
inline
Iterator<number, Constness>::
const SparsityPattern &sparsity = accessor.get_matrix().get_sparsity_pattern();
- const unsigned int this_position
- = (*this != (*this)->get_matrix().end()
- ?
- sparsity.get_rowstart_indices()[(*this)->row()] + (*this)->index()
- :
- sparsity.get_rowstart_indices()[sparsity.n_rows()]);
-
- const unsigned int other_position
- = (other != (*this)->get_matrix().end()
- ?
- sparsity.get_rowstart_indices()[other->row()] + other->index()
- :
- sparsity.get_rowstart_indices()[sparsity.n_rows()]);
-
- return (this_position - other_position);
+ return (*this)->a_index - other->a_index;
}
typename SparseMatrix<number>::const_iterator
SparseMatrix<number>::begin () const
{
- // search for the first line with a nonzero number of entries
- for (unsigned int r=0; r<m(); ++r)
- if (cols->row_length(r) > 0)
- return const_iterator(this, r, 0);
-
- // alright, this matrix is completely empty. that's strange but ok. simply
- // return the end() iterator
- return end();
+ return const_iterator(this, 0);
}
typename SparseMatrix<number>::iterator
SparseMatrix<number>::begin ()
{
- // search for the first line with a nonzero number of entries
- for (unsigned int r=0; r<m(); ++r)
- if (cols->row_length(r) > 0)
- return iterator(this, r, 0);
-
- // alright, this matrix is completely empty. that's strange but ok. simply
- // return the end() iterator
- return end();
+ return iterator (this, 0);
}
typename SparseMatrix<number>::iterator
SparseMatrix<number>::end ()
{
- return iterator(this, m(), 0);
+ return iterator(this, cols->rowstart[cols->rows]);
}
{
Assert (r<m(), ExcIndexRange(r,0,m()));
- if (cols->row_length(r) > 0)
- return const_iterator(this, r, 0);
- else
- return end (r);
+ return const_iterator(this, cols->rowstart[r]);
}
{
Assert (r<m(), ExcIndexRange(r,0,m()));
- // place the iterator on the first entry past this line, or at the end of
- // the matrix
- for (unsigned int i=r+1; i<m(); ++i)
- if (cols->row_length(i) > 0)
- return const_iterator(this, i, 0);
-
- // if there is no such line, then take the end iterator of the matrix
- return end();
+ return const_iterator(this, cols->rowstart[r+1]);
}
{
Assert (r<m(), ExcIndexRange(r,0,m()));
- if (cols->row_length(r) > 0)
- return iterator(this, r, 0);
- else
- return end (r);
+ return iterator(this, cols->rowstart[r]);
}
{
Assert (r<m(), ExcIndexRange(r,0,m()));
- // place the iterator on the first entry past this line, or at the end of
- // the matrix
- for (unsigned int i=r+1; i<m(); ++i)
- if (cols->row_length(i) > 0)
- return iterator(this, i, 0);
-
- // if there is no such line, then take the end iterator of the matrix
- return end();
+ return iterator(this, cols->rowstart[r+1]);
}
{
// first skip diagonal entry
number *val_ptr = &val[cols->rowstart[row]];
- if (cols->optimize_diagonal())
+ if (m() == n())
++val_ptr;
const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[row]+1];
const number *const val_end_of_row = &val[cols->rowstart[row+1]];
#endif
const unsigned int *this_cols =
- &cols->get_column_numbers()[cols->get_rowstart_indices()[row]];
+ &cols->colnums[cols->rowstart[row]];
const unsigned int row_length_1 = cols->row_length(row)-1;
- number *val_ptr = &val[cols->get_rowstart_indices()[row]];
+ number *val_ptr = &val[cols->rowstart[row]];
- if (cols->optimize_diagonal() == true)
+ if (m() == n())
{
// find diagonal and add it if found
// unsorted case: first, search all the
// indices to find out which values we
// actually need to add.
- const unsigned int *const my_cols = cols->get_column_numbers();
- unsigned int index = cols->get_rowstart_indices()[row];
- const unsigned int next_row_index = cols->get_rowstart_indices()[row+1];
+ const unsigned int *const my_cols = cols->colnums;
+ unsigned int index = cols->rowstart[row];
+ const unsigned int next_row_index = cols->rowstart[row+1];
for (unsigned int j=0; j<n_cols; ++j)
{
// First, search all the indices to find
// out which values we actually need to
// set.
- const unsigned int *my_cols = cols->get_column_numbers();
- std::size_t index = cols->get_rowstart_indices()[row], next_index = index;
- const std::size_t next_row_index = cols->get_rowstart_indices()[row+1];
+ const unsigned int *my_cols = cols->colnums;
+ std::size_t index = cols->rowstart[row], next_index = index;
+ const std::size_t next_row_index = cols->rowstart[row+1];
if (elide_zero_values == true)
{
// clear previous content of C
if (rebuild_sparsity_C == true)
{
- // we are about to change the sparsity
- // pattern of C. this can not work if
- // either A or B use the same sparsity
- // pattern
+ // we are about to change the sparsity pattern of C. this can not work
+ // if either A or B use the same sparsity pattern
Assert (&C.get_sparsity_pattern() != &this->get_sparsity_pattern(),
ExcMessage ("Can't use the same sparsity pattern for "
"different matrices if it is to be rebuilt."));
ExcMessage ("Can't use the same sparsity pattern for "
"different matrices if it is to be rebuilt."));
- // need to change the sparsity pattern of
- // C, so cast away const-ness.
+ // need to change the sparsity pattern of C, so cast away const-ness.
SparsityPattern &sp_C =
*(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
C.clear();
sp_C.reinit (0,0,0);
- // create a sparsity pattern for the
- // matrix. we will go through all the
- // rows in the matrix A, and for each
- // column in a row we add the whole row
- // of matrix B with that row number. This
- // means that we will insert a lot of
- // entries to each row, which is best
- // handled by the
+ // create a sparsity pattern for the matrix. we will go through all the
+ // rows in the matrix A, and for each column in a row we add the whole
+ // row of matrix B with that row number. This means that we will insert
+ // a lot of entries to each row, which is best handled by the
// CompressedSimpleSparsityPattern class.
{
CompressedSimpleSparsityPattern csp (m(), B.n());
for (unsigned int i = 0; i < csp.n_rows(); ++i)
{
- const unsigned int *rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+ const unsigned int *rows = &sp_A.colnums[sp_A.rowstart[i]];
const unsigned int *const end_rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+ &sp_A.colnums[sp_A.rowstart[i+1]];
for (; rows != end_rows; ++rows)
{
const unsigned int col = *rows;
unsigned int *new_cols = const_cast<unsigned int *>
- (&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[col]]);
+ (&sp_B.colnums[sp_B.rowstart[col]]);
unsigned int *end_new_cols = const_cast<unsigned int *>
- (&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[col+1]]);
+ (&sp_B.colnums[sp_B.rowstart[col+1]]);
- // if B has a diagonal, need to add that
- // manually. this way, we maintain
- // sortedness.
- if (sp_B.optimize_diagonal() == true)
+ // if B has a diagonal, need to add that manually. this way,
+ // we maintain sortedness.
+ if (sp_B.n_rows() == sp_B.n_cols())
{
++new_cols;
csp.add(i, col);
max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
std::vector<numberC> new_entries(max_n_cols_B);
- // now compute the actual entries: a
- // matrix-matrix product involves three
- // nested loops. One over the rows of A,
- // for each row we then loop over all the
- // columns, and then we need to multiply
- // each element with all the elements in
- // that row in B.
+ // now compute the actual entries: a matrix-matrix product involves three
+ // nested loops. One over the rows of A, for each row we then loop over all
+ // the columns, and then we need to multiply each element with all the
+ // elements in that row in B.
for (unsigned int i=0; i<C.m(); ++i)
{
- const unsigned int *rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+ const unsigned int *rows = &sp_A.colnums[sp_A.rowstart[i]];
+ const unsigned int *const end_rows = &sp_A.colnums[sp_A.rowstart[i+1]];
for (; rows != end_rows; ++rows)
{
- const double A_val = val[rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]];
+ const double A_val = val[rows-&sp_A.colnums[sp_A.rowstart[0]]];
const unsigned int col = *rows;
const unsigned int *new_cols =
- (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col]]);
+ (&sp_B.colnums[sp_B.rowstart[col]]);
// special treatment for diagonal
- if (sp_B.optimize_diagonal())
+ if (sp_B.n_rows() == sp_B.n_cols())
{
C.add (i, *new_cols, A_val *
- B.val[new_cols-&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[0]]] *
+ B.val[new_cols-&sp_B.colnums[sp_B.rowstart[0]]] *
(use_vector ? V(col) : 1));
++new_cols;
}
- // now the innermost loop that goes over
- // all the elements in row 'col' of
- // matrix B. Cache the elements, and then
- // write them into C at once
+ // now the innermost loop that goes over all the elements in row
+ // 'col' of matrix B. Cache the elements, and then write them into C
+ // at once
numberC *new_ptr = &new_entries[0];
const numberB *B_val_ptr =
- &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
- const numberB *const end_cols =
- &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col+1]]-
- &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+ &B.val[new_cols-&sp_B.colnums[sp_B.rowstart[0]]];
+ const numberB *const end_cols = &B.val[sp_B.rowstart[col+1]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
*new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(col) : 1);
// clear previous content of C
if (rebuild_sparsity_C == true)
{
- // we are about to change the sparsity
- // pattern of C. this can not work if
- // either A or B use the same sparsity
- // pattern
+ // we are about to change the sparsity pattern of C. this can not work
+ // if either A or B use the same sparsity pattern
Assert (&C.get_sparsity_pattern() != &this->get_sparsity_pattern(),
ExcMessage ("Can't use the same sparsity pattern for "
"different matrices if it is to be rebuilt."));
ExcMessage ("Can't use the same sparsity pattern for "
"different matrices if it is to be rebuilt."));
- // need to change the sparsity pattern of
- // C, so cast away const-ness.
+ // need to change the sparsity pattern of C, so cast away const-ness.
SparsityPattern &sp_C =
*(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
C.clear();
sp_C.reinit (0,0,0);
- // create a sparsity pattern for the
- // matrix. we will go through all the
- // rows in the matrix A, and for each
- // column in a row we add the whole row
- // of matrix B with that row number. This
- // means that we will insert a lot of
- // entries to each row, which is best
- // handled by the
+ // create a sparsity pattern for the matrix. we will go through all the
+ // rows in the matrix A, and for each column in a row we add the whole
+ // row of matrix B with that row number. This means that we will insert
+ // a lot of entries to each row, which is best handled by the
// CompressedSimpleSparsityPattern class.
{
CompressedSimpleSparsityPattern csp (n(), B.n());
for (unsigned int i = 0; i < sp_A.n_rows(); ++i)
{
const unsigned int *rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+ &sp_A.colnums[sp_A.rowstart[i]];
const unsigned int *const end_rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+ &sp_A.colnums[sp_A.rowstart[i+1]];
+ // cast away constness to conform with csp.add_entries interface
unsigned int *new_cols = const_cast<unsigned int *>
- (&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[i]]);
+ (&sp_B.colnums[sp_B.rowstart[i]]);
unsigned int *end_new_cols = const_cast<unsigned int *>
- (&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[i+1]]);
+ (&sp_B.colnums[sp_B.rowstart[i+1]]);
- if (sp_B.optimize_diagonal() == true)
+ if (sp_B.n_rows() == sp_B.n_cols())
++new_cols;
for (; rows != end_rows; ++rows)
{
const unsigned int row = *rows;
- // if B has a diagonal, need to add that
- // manually. this way, we maintain
- // sortedness.
- if (sp_B.optimize_diagonal() == true)
+ // if B has a diagonal, need to add that manually. this way,
+ // we maintain sortedness.
+ if (sp_B.n_rows() == sp_B.n_cols())
csp.add(row, i);
csp.add_entries (row, new_cols, end_new_cols, true);
max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
std::vector<numberC> new_entries(max_n_cols_B);
- // now compute the actual entries: a
- // matrix-matrix product involves three
- // nested loops. One over the rows of A,
- // for each row we then loop over all the
- // columns, and then we need to multiply
- // each element with all the elements in
- // that row in B.
+ // now compute the actual entries: a matrix-matrix product involves three
+ // nested loops. One over the rows of A, for each row we then loop over all
+ // the columns, and then we need to multiply each element with all the
+ // elements in that row in B.
for (unsigned int i=0; i<m(); ++i)
{
- const unsigned int *rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
- const unsigned int *new_cols =
- (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i]]);
- if (sp_B.optimize_diagonal())
+ const unsigned int *rows = &sp_A.colnums[sp_A.rowstart[i]];
+ const unsigned int *const end_rows = &sp_A.colnums[sp_A.rowstart[i+1]];
+ const unsigned int *new_cols = &sp_B.colnums[sp_B.rowstart[i]];
+ if (sp_B.n_rows() == sp_B.n_cols())
++new_cols;
- const numberB *const end_cols =
- &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i+1]]-
- &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+ const numberB *const end_cols = &B.val[sp_B.rowstart[i+1]];
for (; rows != end_rows; ++rows)
{
const unsigned int row = *rows;
- const double A_val = val[rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]];
+ const double A_val = val[rows-&sp_A.colnums[sp_A.rowstart[0]]];
// special treatment for diagonal
- if (sp_B.optimize_diagonal())
+ if (sp_B.n_rows () == sp_B.n_cols())
C.add (row, i, A_val *
- B.val[new_cols-1-&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[0]]] *
+ B.val[new_cols-1-&sp_B.colnums[sp_B.rowstart[0]]] *
(use_vector ? V(i) : 1));
- // now the innermost loop that goes over
- // all the elements in row 'col' of
- // matrix B. Cache the elements, and then
- // write them into C at once
+ // now the innermost loop that goes over all the elements in row
+ // 'col' of matrix B. Cache the elements, and then write them into C
+ // at once
numberC *new_ptr = &new_entries[0];
const numberB *B_val_ptr =
- &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+ &B.val[new_cols-&sp_B.colnums[sp_B.rowstart[0]]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
*new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(i) : 1);
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
- Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n()));
- Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n()));
+ AssertDimension (m(), n());
+ AssertDimension (dst.size(), n());
+ AssertDimension (src.size(), n());
const unsigned int n = src.size();
somenumber *dst_ptr = dst.begin();
SparseMatrix<number>::precondition_SSOR (Vector<somenumber> &dst,
const Vector<somenumber> &src,
const number om,
- const std::vector<unsigned int> &pos_right_of_diagonal) const
+ const std::vector<std::size_t> &pos_right_of_diagonal) const
{
// to understand how this function works
// you may want to take a look at the CVS
// which is much clearer...
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
- Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n()));
- Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n()));
+ AssertDimension (m(), n());
+ AssertDimension (dst.size(), n());
+ AssertDimension (src.size(), n());
const unsigned int n = src.size();
- const std::size_t *rowstart_ptr = &cols->rowstart[0];
+ const std::size_t *rowstart_ptr = &cols->rowstart[0];
somenumber *dst_ptr = &dst(0);
// case when we have stored the position
for (unsigned int row=0; row<n; ++row, ++dst_ptr, ++rowstart_ptr)
{
*dst_ptr = src(row);
- const unsigned int first_right_of_diagonal_index =
+ const std::size_t first_right_of_diagonal_index =
pos_right_of_diagonal[row];
Assert (first_right_of_diagonal_index <= *(rowstart_ptr+1),
ExcInternalError());
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
dst = src;
SOR(dst,om);
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
dst = src;
TSOR(dst,om);
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
- Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
+ AssertDimension (m(), n());
+ AssertDimension (dst.size(), n());
for (unsigned int row=0; row<m(); ++row)
{
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
- Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
+ AssertDimension (m(), n());
+ AssertDimension (dst.size(), n());
unsigned int row=m()-1;
while (true)
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
+ AssertDimension (m(), n());
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
Assert (m() == permutation.size(),
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
+ AssertDimension (m(), n());
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
Assert (m() == permutation.size(),
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
+ AssertDimension (m(), n());
Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size()));
Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size()));
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
+ AssertDimension (m(), n());
Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size()));
Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size()));
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
+ AssertDimension (m(), n());
Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size()));
Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size()));
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- Assert (cols->optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
-
+ AssertDimension (m(), n());
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
const unsigned int n = dst.size();
void precondition_SSOR (Vector<somenumber> &dst,
const Vector<somenumber> &src,
const number om = 1.,
- const std::vector<unsigned int> &pos_right_of_diagonal = std::vector<unsigned int>()) const;
+ const std::vector<std::size_t> &pos_right_of_diagonal = std::vector<std::size_t>()) const;
/**
* Apply SOR preconditioning matrix to @p src.
SparseMatrixEZ<number>::precondition_SSOR (Vector<somenumber> &dst,
const Vector<somenumber> &src,
const number om,
- const std::vector<unsigned int> &) const
+ const std::vector<std::size_t> &) const
{
Assert (m() == n(), ExcNotQuadratic());
Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n()));
precondition_SSOR<S2> (Vector<S2> &,
const Vector<S2> &,
const S1,
- const std::vector<unsigned int>&) const;
+ const std::vector<std::size_t>&) const;
template void SparseMatrix<S1>::
precondition_SOR<S2> (Vector<S2> &,
void SparseMatrixEZ<S1>::precondition_SSOR<S2> (Vector<S2> &,
const Vector<S2> &,
const S1,
- const std::vector<unsigned int>&) const;
+ const std::vector<std::size_t>&) const;
template
void SparseMatrixEZ<S1>::precondition_SOR<S2> (Vector<S2> &,
const Vector<S2> &,