From: kronbichler Date: Wed, 6 Feb 2013 10:44:40 +0000 (+0000) Subject: Improve performance in the typical use case of sparse matrix iterator by basing the... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=64620c215608bfc452a2fe0a5663762a787bdeb5;p=dealii-svn.git Improve performance in the typical use case of sparse matrix iterator by basing the iterator on the total index. git-svn-id: https://svn.dealii.org/trunk@28246 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/precondition.h b/deal.II/include/deal.II/lac/precondition.h index 497a0c153c..a56e555443 100644 --- a/deal.II/include/deal.II/lac/precondition.h +++ b/deal.II/include/deal.II/lac/precondition.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,13 @@ DEAL_II_NAMESPACE_OPEN template class Vector; template class SparseMatrix; +namespace parallel +{ + namespace distributed + { + template class Vector; + } +} /*! @addtogroup Preconditioners *@{ @@ -619,7 +627,7 @@ private: * row where the first position after * the diagonal is located. */ - std::vector pos_right_of_diagonal; + std::vector pos_right_of_diagonal; }; @@ -1311,36 +1319,28 @@ PreconditionSSOR::initialize (const MATRIX &rA, { this->PreconditionRelaxation::initialize (rA, parameters); - // in case we have a SparseMatrix class, - // we can extract information about the - // diagonal. + // in case we have a SparseMatrix class, we can extract information about + // the diagonal. const SparseMatrix *mat = dynamic_cast *>(&*this->A); - // calculate the positions first after - // the diagonal. + // calculate the positions first after the diagonal. if (mat != 0) { - const std::size_t *rowstart_ptr = - mat->get_sparsity_pattern().get_rowstart_indices(); - const unsigned int *const colnums = - mat->get_sparsity_pattern().get_column_numbers(); const unsigned int n = this->A->n(); - pos_right_of_diagonal.resize(n); - for (unsigned int row=0; row(-1)); + for (unsigned int row=0; row::const_iterator + it = mat->begin(row)+1; + for ( ; it < mat->end(row); ++it) + if (it->column() > row) + break; + pos_right_of_diagonal[row] = it - mat->begin(); } } } @@ -1648,7 +1648,8 @@ PreconditionChebyshev::initialize (const MATRIX &matrix, // need at least two iterations to have // maximum and minimum eigenvalue - if (it > data.eig_cg_n_iterations || (it > 2 && + if (res == 0. || + it > data.eig_cg_n_iterations || (it > 2 && res < data.eig_cg_residual)) break; @@ -1662,16 +1663,24 @@ PreconditionChebyshev::initialize (const MATRIX &matrix, offdiagonal.push_back(std::sqrt(beta)/alpha); } - TridiagonalMatrix T(diagonal.size(), true); - for (unsigned int i=0; i T(diagonal.size(), true); + for (unsigned int i=0; i 1) + max_eigenvalue = T.eigenvalue(T.n()-1); + else + max_eigenvalue = min_eigenvalue; } - T.compute_eigenvalues(); - min_eigenvalue = T.eigenvalue(0); - max_eigenvalue = T.eigenvalue(T.n()-1); } // include a safety factor since the CG @@ -1687,6 +1696,161 @@ PreconditionChebyshev::initialize (const MATRIX &matrix, +namespace internal +{ + namespace PreconditionChebyshev + { + // for deal.II vectors, perform updates for Chebyshev preconditioner all + // at once to reduce memory transfer. Here, we select between general + // vectors and deal.II vectors where we expand the loop over the (local) + // size of the vector + + // generic part for non-deal.II vectors + template + inline + void + vector_updates (const VECTOR &src, + const VECTOR &matrix_diagonal_inverse, + const bool start_zero, + const double factor1, + const double factor2, + VECTOR &update1, + VECTOR &update2, + VECTOR &dst) + { + if (start_zero) + { + dst.equ (factor2, src); + dst.scale (matrix_diagonal_inverse); + update1.equ(-1.,dst); + } + else + { + update2 -= src; + update2.scale (matrix_diagonal_inverse); + if (factor1 == 0.) + update1.equ(factor2, update2); + else + update1.sadd(factor1, factor2, update2); + dst -= update1; + } + } + + // worker loop for deal.II vectors + template + struct VectorUpdatesRange : public parallel::ParallelForInteger + { + VectorUpdatesRange (const size_t size, + const Number *src, + const Number *matrix_diagonal_inverse, + const bool start_zero, + const Number factor1, + const Number factor2, + Number *update1, + Number *update2, + Number *dst) + : + src (src), + matrix_diagonal_inverse (matrix_diagonal_inverse), + start_zero (start_zero), + factor1 (factor1), + factor2 (factor2), + update1 (update1), + update2 (update2), + dst (dst) + { + if (size < internal::Vector::minimum_parallel_grain_size) + apply_to_subrange (0, size); + else + apply_parallel (0, size, + internal::Vector::minimum_parallel_grain_size); + } + + ~VectorUpdatesRange() + {} + + virtual void + apply_to_subrange (const size_t begin, + const size_t end) const + { + if (factor1 == Number()) + { + if (start_zero) + for (unsigned int i=begin; i + inline + void + vector_updates (const ::dealii::Vector &src, + const ::dealii::Vector &matrix_diagonal_inverse, + const bool start_zero, + const double factor1, + const double factor2, + ::dealii::Vector &update1, + ::dealii::Vector &update2, + ::dealii::Vector &dst) + { + VectorUpdatesRange(src.size(), src.begin(), + matrix_diagonal_inverse.begin(), + start_zero, factor1, factor2, + update1.begin(), update2.begin(), dst.begin()); + } + + // selection for parallel deal.II vector + template + inline + void + vector_updates (const parallel::distributed::Vector &src, + const parallel::distributed::Vector &matrix_diagonal_inverse, + const bool start_zero, + const double factor1, + const double factor2, + parallel::distributed::Vector &update1, + parallel::distributed::Vector &update2, + parallel::distributed::Vector &dst) + { + VectorUpdatesRange(src.local_size(), src.begin(), + matrix_diagonal_inverse.begin(), + start_zero, factor1, factor2, + update1.begin(), update2.begin(), dst.begin()); + } + } +} + + + template inline void @@ -1697,29 +1861,25 @@ PreconditionChebyshev::vmult (VECTOR &dst, double rhok = delta / theta, sigma = theta / delta; if (data.nonzero_starting && !dst.all_zero()) { - matrix_ptr->vmult (update1, dst); - update1 -= src; - update1 /= theta; - update1.scale (data.matrix_diagonal_inverse); - dst -= update1; + matrix_ptr->vmult (update2, dst); + internal::PreconditionChebyshev::vector_updates + (src, data.matrix_diagonal_inverse, false, 0., 1./theta, update1, + update2, dst); } else - { - dst.equ (1./theta, src); - dst.scale (data.matrix_diagonal_inverse); - update1.equ(-1.,dst); - } + internal::PreconditionChebyshev::vector_updates + (src, data.matrix_diagonal_inverse, true, 0., 1./theta, update1, + update2, dst); for (unsigned int k=0; kvmult (update2, dst); - update2 -= src; - update2.scale (data.matrix_diagonal_inverse); const double rhokp = 1./(2.*sigma-rhok); const double factor1 = rhokp * rhok, factor2 = 2.*rhokp/delta; rhok = rhokp; - update1.sadd (factor1, factor2, update2); - dst -= update1; + internal::PreconditionChebyshev::vector_updates + (src, data.matrix_diagonal_inverse, false, factor1, factor2, update1, + update2, dst); } } @@ -1735,29 +1895,25 @@ PreconditionChebyshev::Tvmult (VECTOR &dst, double rhok = delta / theta, sigma = theta / delta; if (data.nonzero_starting && !dst.all_zero()) { - matrix_ptr->Tvmult (update1, dst); - update1 -= src; - update1 /= theta; - update1.scale (data.matrix_diagonal_inverse); - dst -= update1; + matrix_ptr->Tvmult (update2, dst); + internal::PreconditionChebyshev::vector_updates + (src, data.matrix_diagonal_inverse, false, 0., 1./theta, update1, + update2, dst); } else - { - dst.equ (1./theta, src); - dst.scale (data.matrix_diagonal_inverse); - update1.equ(-1.,dst); - } + internal::PreconditionChebyshev::vector_updates + (src, data.matrix_diagonal_inverse, true, 0., 1./theta, update1, + update2, dst); for (unsigned int k=0; kTvmult (update2, dst); - update2 -= src; - update2.scale (data.matrix_diagonal_inverse); const double rhokp = 1./(2.*sigma-rhok); const double factor1 = rhokp * rhok, factor2 = 2.*rhokp/delta; rhok = rhokp; - update1.sadd (factor1, factor2, update2); - dst -= update1; + internal::PreconditionChebyshev::vector_updates + (src, data.matrix_diagonal_inverse, false, factor1, factor2, update1, + update2, dst); } } diff --git a/deal.II/include/deal.II/lac/sparse_matrix.h b/deal.II/include/deal.II/lac/sparse_matrix.h index 0a2d7e08ed..e061abc4f7 100644 --- a/deal.II/include/deal.II/lac/sparse_matrix.h +++ b/deal.II/include/deal.II/lac/sparse_matrix.h @@ -98,6 +98,12 @@ namespace SparseMatrixIterators const unsigned int row, const unsigned int index); + /** + * Constructor. + */ + Accessor (MatrixType *matrix, + const std::size_t index_with_matrix); + /** * Constructor. Construct the end accessor for the given matrix. */ @@ -234,6 +240,12 @@ namespace SparseMatrixIterators const unsigned int row, const unsigned int index); + /** + * Constructor. + */ + Accessor (MatrixType *matrix, + const std::size_t index); + /** * Constructor. Construct the end accessor for the given matrix. */ @@ -318,6 +330,13 @@ namespace SparseMatrixIterators const unsigned int row, const unsigned int index); + /** + * Constructor. Create an iterator into the matrix @p matrix for the given + * index in the complete matrix (counting from the zeroth entry). + */ + Iterator (MatrixType *matrix, + const std::size_t index_within_matrix); + /** * Constructor. Create the end iterator for the given matrix. */ @@ -1198,7 +1217,7 @@ public: void precondition_SSOR (Vector &dst, const Vector &src, const number omega = 1., - const std::vector &pos_right_of_diagonal=std::vector()) const; + const std::vector &pos_right_of_diagonal=std::vector()) const; /** * Apply SOR preconditioning matrix to src. @@ -1850,7 +1869,7 @@ inline number SparseMatrix::diag_element (const unsigned int i) const { Assert (cols != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), ExcNotQuadratic()); + Assert (m() == n(), ExcNotQuadratic()); Assert (i::diag_element (const unsigned int i) { Assert (cols != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), ExcNotQuadratic()); + Assert (m() == n(), ExcNotQuadratic()); Assert (i + inline + Accessor:: + Accessor (const MatrixType *matrix, + const std::size_t index) + : + SparsityPatternIterators::Accessor (&matrix->get_sparsity_pattern(), + index), + matrix (matrix) + {} + + + template inline Accessor:: @@ -1999,7 +2031,8 @@ namespace SparseMatrixIterators number Accessor::value () const { - return matrix->nth_entry_in_row(a_row, a_index); + AssertIndexRange(a_index, matrix->n_nonzero_elements()); + return matrix->val[a_index]; } @@ -2028,8 +2061,8 @@ namespace SparseMatrixIterators inline Accessor::Reference::operator number() const { - return accessor->matrix->nth_entry_in_row(accessor->a_row, - accessor->a_index); + AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements()); + return accessor->matrix->val[accessor->a_index]; } @@ -2039,8 +2072,8 @@ namespace SparseMatrixIterators const typename Accessor::Reference & Accessor::Reference::operator = (const number n) const { -//TODO: one could optimize this by not going again through the mapping from row/col index to global index - accessor->matrix->set (accessor->row(), accessor->column(), n); + AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements()); + accessor->matrix->val[accessor->a_index] = n; return *this; } @@ -2051,9 +2084,8 @@ namespace SparseMatrixIterators const typename Accessor::Reference & Accessor::Reference::operator += (const number n) const { -//TODO: one could optimize this by not going again through the mapping from row/col index to global index - accessor->matrix->set (accessor->row(), accessor->column(), - static_cast(*this) + n); + AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements()); + accessor->matrix->val[accessor->a_index] += n; return *this; } @@ -2064,9 +2096,8 @@ namespace SparseMatrixIterators const typename Accessor::Reference & Accessor::Reference::operator -= (const number n) const { -//TODO: one could optimize this by not going again through the mapping from row/col index to global index - accessor->matrix->set (accessor->row(), accessor->column(), - static_cast(*this) - n); + AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements()); + accessor->matrix->val[accessor->a_index] -= n; return *this; } @@ -2077,9 +2108,8 @@ namespace SparseMatrixIterators const typename Accessor::Reference & Accessor::Reference::operator *= (const number n) const { -//TODO: one could optimize this by not going again through the mapping from row/col index to global index - accessor->matrix->set (accessor->row(), accessor->column(), - static_cast(*this)*n); + AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements()); + accessor->matrix->val[accessor->a_index] *= n; return *this; } @@ -2090,9 +2120,8 @@ namespace SparseMatrixIterators const typename Accessor::Reference & Accessor::Reference::operator /= (const number n) const { -//TODO: one could optimize this by not going again through the mapping from row/col index to global index - accessor->matrix->set (accessor->row(), accessor->column(), - static_cast(*this)/n); + AssertIndexRange(accessor->a_index, accessor->matrix->n_nonzero_elements()); + accessor->matrix->val[accessor->a_index] /= n; return *this; } @@ -2112,6 +2141,19 @@ namespace SparseMatrixIterators + template + inline + Accessor:: + Accessor (MatrixType *matrix, + const std::size_t index) + : + SparsityPatternIterators::Accessor (&matrix->get_sparsity_pattern(), + index), + matrix (matrix) + {} + + + template inline Accessor:: @@ -2156,6 +2198,17 @@ namespace SparseMatrixIterators + template + inline + Iterator:: + Iterator (MatrixType *matrix, + const std::size_t index) + : + accessor(matrix, index) + {} + + + template inline Iterator:: @@ -2269,21 +2322,7 @@ namespace SparseMatrixIterators const SparsityPattern &sparsity = accessor.get_matrix().get_sparsity_pattern(); - const unsigned int this_position - = (*this != (*this)->get_matrix().end() - ? - sparsity.get_rowstart_indices()[(*this)->row()] + (*this)->index() - : - sparsity.get_rowstart_indices()[sparsity.n_rows()]); - - const unsigned int other_position - = (other != (*this)->get_matrix().end() - ? - sparsity.get_rowstart_indices()[other->row()] + other->index() - : - sparsity.get_rowstart_indices()[sparsity.n_rows()]); - - return (this_position - other_position); + return (*this)->a_index - other->a_index; } @@ -2310,14 +2349,7 @@ inline typename SparseMatrix::const_iterator SparseMatrix::begin () const { - // search for the first line with a nonzero number of entries - for (unsigned int r=0; rrow_length(r) > 0) - return const_iterator(this, r, 0); - - // alright, this matrix is completely empty. that's strange but ok. simply - // return the end() iterator - return end(); + return const_iterator(this, 0); } @@ -2335,14 +2367,7 @@ inline typename SparseMatrix::iterator SparseMatrix::begin () { - // search for the first line with a nonzero number of entries - for (unsigned int r=0; rrow_length(r) > 0) - return iterator(this, r, 0); - - // alright, this matrix is completely empty. that's strange but ok. simply - // return the end() iterator - return end(); + return iterator (this, 0); } @@ -2351,7 +2376,7 @@ inline typename SparseMatrix::iterator SparseMatrix::end () { - return iterator(this, m(), 0); + return iterator(this, cols->rowstart[cols->rows]); } @@ -2362,10 +2387,7 @@ SparseMatrix::begin (const unsigned int r) const { Assert (rrow_length(r) > 0) - return const_iterator(this, r, 0); - else - return end (r); + return const_iterator(this, cols->rowstart[r]); } @@ -2377,14 +2399,7 @@ SparseMatrix::end (const unsigned int r) const { Assert (rrow_length(i) > 0) - return const_iterator(this, i, 0); - - // if there is no such line, then take the end iterator of the matrix - return end(); + return const_iterator(this, cols->rowstart[r+1]); } @@ -2396,10 +2411,7 @@ SparseMatrix::begin (const unsigned int r) { Assert (rrow_length(r) > 0) - return iterator(this, r, 0); - else - return end (r); + return iterator(this, cols->rowstart[r]); } @@ -2411,14 +2423,7 @@ SparseMatrix::end (const unsigned int r) { Assert (rrow_length(i) > 0) - return iterator(this, i, 0); - - // if there is no such line, then take the end iterator of the matrix - return end(); + return iterator(this, cols->rowstart[r+1]); } diff --git a/deal.II/include/deal.II/lac/sparse_matrix.templates.h b/deal.II/include/deal.II/lac/sparse_matrix.templates.h index 8b929f9e45..30f7c18a67 100644 --- a/deal.II/include/deal.II/lac/sparse_matrix.templates.h +++ b/deal.II/include/deal.II/lac/sparse_matrix.templates.h @@ -289,7 +289,7 @@ SparseMatrix::symmetrize () { // first skip diagonal entry number *val_ptr = &val[cols->rowstart[row]]; - if (cols->optimize_diagonal()) + if (m() == n()) ++val_ptr; const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[row]+1]; const number *const val_end_of_row = &val[cols->rowstart[row+1]]; @@ -450,11 +450,11 @@ SparseMatrix::add (const unsigned int row, #endif const unsigned int *this_cols = - &cols->get_column_numbers()[cols->get_rowstart_indices()[row]]; + &cols->colnums[cols->rowstart[row]]; const unsigned int row_length_1 = cols->row_length(row)-1; - number *val_ptr = &val[cols->get_rowstart_indices()[row]]; + number *val_ptr = &val[cols->rowstart[row]]; - if (cols->optimize_diagonal() == true) + if (m() == n()) { // find diagonal and add it if found @@ -521,9 +521,9 @@ SparseMatrix::add (const unsigned int row, // unsorted case: first, search all the // indices to find out which values we // actually need to add. - const unsigned int *const my_cols = cols->get_column_numbers(); - unsigned int index = cols->get_rowstart_indices()[row]; - const unsigned int next_row_index = cols->get_rowstart_indices()[row+1]; + const unsigned int *const my_cols = cols->colnums; + unsigned int index = cols->rowstart[row]; + const unsigned int next_row_index = cols->rowstart[row+1]; for (unsigned int j=0; j::set (const unsigned int row, // First, search all the indices to find // out which values we actually need to // set. - const unsigned int *my_cols = cols->get_column_numbers(); - std::size_t index = cols->get_rowstart_indices()[row], next_index = index; - const std::size_t next_row_index = cols->get_rowstart_indices()[row+1]; + const unsigned int *my_cols = cols->colnums; + std::size_t index = cols->rowstart[row], next_index = index; + const std::size_t next_row_index = cols->rowstart[row+1]; if (elide_zero_values == true) { @@ -902,10 +902,8 @@ SparseMatrix::mmult (SparseMatrix &C, // clear previous content of C if (rebuild_sparsity_C == true) { - // we are about to change the sparsity - // pattern of C. this can not work if - // either A or B use the same sparsity - // pattern + // we are about to change the sparsity pattern of C. this can not work + // if either A or B use the same sparsity pattern Assert (&C.get_sparsity_pattern() != &this->get_sparsity_pattern(), ExcMessage ("Can't use the same sparsity pattern for " "different matrices if it is to be rebuilt.")); @@ -913,44 +911,35 @@ SparseMatrix::mmult (SparseMatrix &C, ExcMessage ("Can't use the same sparsity pattern for " "different matrices if it is to be rebuilt.")); - // need to change the sparsity pattern of - // C, so cast away const-ness. + // need to change the sparsity pattern of C, so cast away const-ness. SparsityPattern &sp_C = *(const_cast(&C.get_sparsity_pattern())); C.clear(); sp_C.reinit (0,0,0); - // create a sparsity pattern for the - // matrix. we will go through all the - // rows in the matrix A, and for each - // column in a row we add the whole row - // of matrix B with that row number. This - // means that we will insert a lot of - // entries to each row, which is best - // handled by the + // create a sparsity pattern for the matrix. we will go through all the + // rows in the matrix A, and for each column in a row we add the whole + // row of matrix B with that row number. This means that we will insert + // a lot of entries to each row, which is best handled by the // CompressedSimpleSparsityPattern class. { CompressedSimpleSparsityPattern csp (m(), B.n()); for (unsigned int i = 0; i < csp.n_rows(); ++i) { - const unsigned int *rows = - &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; + const unsigned int *rows = &sp_A.colnums[sp_A.rowstart[i]]; const unsigned int *const end_rows = - &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; + &sp_A.colnums[sp_A.rowstart[i+1]]; for (; rows != end_rows; ++rows) { const unsigned int col = *rows; unsigned int *new_cols = const_cast - (&sp_B.get_column_numbers() - [sp_B.get_rowstart_indices()[col]]); + (&sp_B.colnums[sp_B.rowstart[col]]); unsigned int *end_new_cols = const_cast - (&sp_B.get_column_numbers() - [sp_B.get_rowstart_indices()[col+1]]); + (&sp_B.colnums[sp_B.rowstart[col+1]]); - // if B has a diagonal, need to add that - // manually. this way, we maintain - // sortedness. - if (sp_B.optimize_diagonal() == true) + // if B has a diagonal, need to add that manually. this way, + // we maintain sortedness. + if (sp_B.n_rows() == sp_B.n_cols()) { ++new_cols; csp.add(i, col); @@ -977,46 +966,37 @@ SparseMatrix::mmult (SparseMatrix &C, max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i)); std::vector new_entries(max_n_cols_B); - // now compute the actual entries: a - // matrix-matrix product involves three - // nested loops. One over the rows of A, - // for each row we then loop over all the - // columns, and then we need to multiply - // each element with all the elements in - // that row in B. + // now compute the actual entries: a matrix-matrix product involves three + // nested loops. One over the rows of A, for each row we then loop over all + // the columns, and then we need to multiply each element with all the + // elements in that row in B. for (unsigned int i=0; i::Tmmult (SparseMatrix &C, // clear previous content of C if (rebuild_sparsity_C == true) { - // we are about to change the sparsity - // pattern of C. this can not work if - // either A or B use the same sparsity - // pattern + // we are about to change the sparsity pattern of C. this can not work + // if either A or B use the same sparsity pattern Assert (&C.get_sparsity_pattern() != &this->get_sparsity_pattern(), ExcMessage ("Can't use the same sparsity pattern for " "different matrices if it is to be rebuilt.")); @@ -1060,48 +1038,41 @@ SparseMatrix::Tmmult (SparseMatrix &C, ExcMessage ("Can't use the same sparsity pattern for " "different matrices if it is to be rebuilt.")); - // need to change the sparsity pattern of - // C, so cast away const-ness. + // need to change the sparsity pattern of C, so cast away const-ness. SparsityPattern &sp_C = *(const_cast(&C.get_sparsity_pattern())); C.clear(); sp_C.reinit (0,0,0); - // create a sparsity pattern for the - // matrix. we will go through all the - // rows in the matrix A, and for each - // column in a row we add the whole row - // of matrix B with that row number. This - // means that we will insert a lot of - // entries to each row, which is best - // handled by the + // create a sparsity pattern for the matrix. we will go through all the + // rows in the matrix A, and for each column in a row we add the whole + // row of matrix B with that row number. This means that we will insert + // a lot of entries to each row, which is best handled by the // CompressedSimpleSparsityPattern class. { CompressedSimpleSparsityPattern csp (n(), B.n()); for (unsigned int i = 0; i < sp_A.n_rows(); ++i) { const unsigned int *rows = - &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; + &sp_A.colnums[sp_A.rowstart[i]]; const unsigned int *const end_rows = - &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; + &sp_A.colnums[sp_A.rowstart[i+1]]; + // cast away constness to conform with csp.add_entries interface unsigned int *new_cols = const_cast - (&sp_B.get_column_numbers() - [sp_B.get_rowstart_indices()[i]]); + (&sp_B.colnums[sp_B.rowstart[i]]); unsigned int *end_new_cols = const_cast - (&sp_B.get_column_numbers() - [sp_B.get_rowstart_indices()[i+1]]); + (&sp_B.colnums[sp_B.rowstart[i+1]]); - if (sp_B.optimize_diagonal() == true) + if (sp_B.n_rows() == sp_B.n_cols()) ++new_cols; for (; rows != end_rows; ++rows) { const unsigned int row = *rows; - // if B has a diagonal, need to add that - // manually. this way, we maintain - // sortedness. - if (sp_B.optimize_diagonal() == true) + // if B has a diagonal, need to add that manually. this way, + // we maintain sortedness. + if (sp_B.n_rows() == sp_B.n_cols()) csp.add(row, i); csp.add_entries (row, new_cols, end_new_cols, true); @@ -1125,47 +1096,37 @@ SparseMatrix::Tmmult (SparseMatrix &C, max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i)); std::vector new_entries(max_n_cols_B); - // now compute the actual entries: a - // matrix-matrix product involves three - // nested loops. One over the rows of A, - // for each row we then loop over all the - // columns, and then we need to multiply - // each element with all the elements in - // that row in B. + // now compute the actual entries: a matrix-matrix product involves three + // nested loops. One over the rows of A, for each row we then loop over all + // the columns, and then we need to multiply each element with all the + // elements in that row in B. for (unsigned int i=0; i::precondition_Jacobi (Vector &dst, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - - Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n())); - Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n())); + AssertDimension (m(), n()); + AssertDimension (dst.size(), n()); + AssertDimension (src.size(), n()); const unsigned int n = src.size(); somenumber *dst_ptr = dst.begin(); @@ -1358,7 +1317,7 @@ void SparseMatrix::precondition_SSOR (Vector &dst, const Vector &src, const number om, - const std::vector &pos_right_of_diagonal) const + const std::vector &pos_right_of_diagonal) const { // to understand how this function works // you may want to take a look at the CVS @@ -1366,14 +1325,12 @@ SparseMatrix::precondition_SSOR (Vector &dst, // which is much clearer... Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - - Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n())); - Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n())); + AssertDimension (m(), n()); + AssertDimension (dst.size(), n()); + AssertDimension (src.size(), n()); const unsigned int n = src.size(); - const std::size_t *rowstart_ptr = &cols->rowstart[0]; + const std::size_t *rowstart_ptr = &cols->rowstart[0]; somenumber *dst_ptr = &dst(0); // case when we have stored the position @@ -1388,7 +1345,7 @@ SparseMatrix::precondition_SSOR (Vector &dst, for (unsigned int row=0; row::precondition_SOR (Vector &dst, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - dst = src; SOR(dst,om); @@ -1510,9 +1464,6 @@ SparseMatrix::precondition_TSOR (Vector &dst, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - dst = src; TSOR(dst,om); @@ -1527,10 +1478,8 @@ SparseMatrix::SOR (Vector &dst, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - - Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); + AssertDimension (m(), n()); + AssertDimension (dst.size(), n()); for (unsigned int row=0; row::TSOR (Vector &dst, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - - Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); + AssertDimension (m(), n()); + AssertDimension (dst.size(), n()); unsigned int row=m()-1; while (true) @@ -1590,8 +1537,7 @@ SparseMatrix::PSOR (Vector &dst, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); + AssertDimension (m(), n()); Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); Assert (m() == permutation.size(), @@ -1629,8 +1575,7 @@ SparseMatrix::TPSOR (Vector &dst, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); + AssertDimension (m(), n()); Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); Assert (m() == permutation.size(), @@ -1666,8 +1611,7 @@ SparseMatrix::Jacobi_step (Vector &v, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); + AssertDimension (m(), n()); Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size())); Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size())); @@ -1698,9 +1642,7 @@ SparseMatrix::SOR_step (Vector &v, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - + AssertDimension (m(), n()); Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size())); Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size())); @@ -1727,9 +1669,7 @@ SparseMatrix::TSOR_step (Vector &v, { Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - + AssertDimension (m(), n()); Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size())); Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size())); @@ -1771,9 +1711,7 @@ SparseMatrix::SSOR (Vector &dst, Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); - Assert (cols->optimize_diagonal(), - typename SparsityPattern::ExcDiagonalNotOptimized()); - + AssertDimension (m(), n()); Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); const unsigned int n = dst.size(); diff --git a/deal.II/include/deal.II/lac/sparse_matrix_ez.h b/deal.II/include/deal.II/lac/sparse_matrix_ez.h index 08718a1c62..0e0b63acb9 100644 --- a/deal.II/include/deal.II/lac/sparse_matrix_ez.h +++ b/deal.II/include/deal.II/lac/sparse_matrix_ez.h @@ -790,7 +790,7 @@ public: void precondition_SSOR (Vector &dst, const Vector &src, const number om = 1., - const std::vector &pos_right_of_diagonal = std::vector()) const; + const std::vector &pos_right_of_diagonal = std::vector()) const; /** * Apply SOR preconditioning matrix to @p src. diff --git a/deal.II/include/deal.II/lac/sparse_matrix_ez.templates.h b/deal.II/include/deal.II/lac/sparse_matrix_ez.templates.h index c3cb1ec036..1c4ee5b8f6 100644 --- a/deal.II/include/deal.II/lac/sparse_matrix_ez.templates.h +++ b/deal.II/include/deal.II/lac/sparse_matrix_ez.templates.h @@ -328,7 +328,7 @@ void SparseMatrixEZ::precondition_SSOR (Vector &dst, const Vector &src, const number om, - const std::vector &) const + const std::vector &) const { Assert (m() == n(), ExcNotQuadratic()); Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n())); diff --git a/deal.II/source/lac/sparse_matrix.inst.in b/deal.II/source/lac/sparse_matrix.inst.in index 2ce3a691ab..a958f23424 100644 --- a/deal.II/source/lac/sparse_matrix.inst.in +++ b/deal.II/source/lac/sparse_matrix.inst.in @@ -67,7 +67,7 @@ for (S1, S2 : REAL_SCALARS) precondition_SSOR (Vector &, const Vector &, const S1, - const std::vector&) const; + const std::vector&) const; template void SparseMatrix:: precondition_SOR (Vector &, diff --git a/deal.II/source/lac/sparse_matrix_ez.inst.in b/deal.II/source/lac/sparse_matrix_ez.inst.in index f988b590d2..1747bc210b 100644 --- a/deal.II/source/lac/sparse_matrix_ez.inst.in +++ b/deal.II/source/lac/sparse_matrix_ez.inst.in @@ -37,7 +37,7 @@ for (S1, S2 : REAL_SCALARS) void SparseMatrixEZ::precondition_SSOR (Vector &, const Vector &, const S1, - const std::vector&) const; + const std::vector&) const; template void SparseMatrixEZ::precondition_SOR (Vector &, const Vector &,