From: Martin Kronbichler Date: Fri, 15 Jan 2010 12:58:40 +0000 (+0000) Subject: Slight cleanup in SSOR preconditioner. X-Git-Tag: v8.0.0~6625 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c5bc8a5bdc47c2d4ca34252c3242ebfc64de303d;p=dealii.git Slight cleanup in SSOR preconditioner. git-svn-id: https://svn.dealii.org/trunk@20369 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/lac/include/lac/sparse_matrix.templates.h b/deal.II/lac/include/lac/sparse_matrix.templates.h index bccdf982ed..da1d17fae7 100644 --- a/deal.II/lac/include/lac/sparse_matrix.templates.h +++ b/deal.II/lac/include/lac/sparse_matrix.templates.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -119,7 +119,7 @@ template SparseMatrix::~SparseMatrix () { cols = 0; - + if (val != 0) delete[] val; } @@ -131,7 +131,7 @@ SparseMatrix & SparseMatrix::operator = (const double d) { Assert (d==0, ExcScalarAssignmentOnlyForZeroValue()); - + Assert (cols != 0, ExcNotInitialized()); Assert (cols->compressed || cols->empty(), SparsityPattern::ExcNotCompressed()); @@ -245,7 +245,7 @@ SparseMatrix::symmetrize () { Assert (cols != 0, ExcNotInitialized()); Assert (cols->rows == cols->cols, ExcNotQuadratic()); - + const unsigned int n_rows = m(); for (unsigned int row=0; row::symmetrize () number *val_ptr = &val[cols->rowstart[row]]; if (cols->optimize_diagonal()) ++val_ptr; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[row]+1]; + const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[row]+1]; const number *const val_end_of_row = &val[cols->rowstart[row+1]]; // treat lower left triangle @@ -289,7 +289,7 @@ SparseMatrix::copy_from (const SparseMatrix &matrix) std::copy (&matrix.val[0], &matrix.val[cols->n_nonzero_elements()], &val[0]); - + return *this; } @@ -402,18 +402,18 @@ SparseMatrix::add (const unsigned int row, // just go through the column indices and // look whether we found one, rather than // doing many binary searches - if (elide_zero_values == false && col_indices_are_sorted == true && + if (elide_zero_values == false && col_indices_are_sorted == true && n_cols > 3) { // check whether the given indices are // really sorted #ifdef DEBUG for (unsigned int i=1; i col_indices[i-1], + Assert (col_indices[i] > col_indices[i-1], ExcMessage("List of indices not sorted or with duplicates.")); #endif - const unsigned int * this_cols = + const unsigned int * this_cols = &cols->get_column_numbers()[cols->get_rowstart_indices()[row]]; number * val_ptr = &val[cols->get_rowstart_indices()[row]]; @@ -422,7 +422,7 @@ SparseMatrix::add (const unsigned int row, // find diagonal and add it if found Assert (this_cols[0] == row, ExcInternalError()); - const unsigned int * diag_pos = + const unsigned int * diag_pos = internals::SparsityPatternTools::optimized_lower_bound (col_indices, col_indices+n_cols, row); @@ -471,7 +471,7 @@ SparseMatrix::add (const unsigned int row, for (unsigned int i=0; i= this_cols[counter], ExcInternalError()); - + while (this_cols[counter] < col_indices[i]) ++counter; @@ -751,7 +751,7 @@ namespace internal const InVector &v) { number norm_sqr=0.; - + for (unsigned int i=begin_row; i::mmult (SparseMatrix &C, { // need to change the sparsity pattern of // C, so cast away const-ness. - SparsityPattern & sp_C = + SparsityPattern & sp_C = *(const_cast(&C.get_sparsity_pattern())); C.clear(); sp_C.reinit (0,0,0); @@ -895,9 +895,9 @@ SparseMatrix::mmult (SparseMatrix &C, CompressedSimpleSparsityPattern csp (m(), B.n()); for (unsigned int i = 0; i < csp.n_rows(); ++i) { - const unsigned int * rows = + const unsigned int * rows = &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; - const unsigned int *const end_rows = + const unsigned int *const end_rows = &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; for (; rows != end_rows; ++rows) { @@ -948,24 +948,24 @@ SparseMatrix::mmult (SparseMatrix &C, // that row in B. for (unsigned int i=0; i::mmult (SparseMatrix &C, // matrix B. Cache the elements, and then // write them into C at once numberC * new_ptr = &new_entries[0]; - const numberB * B_val_ptr = + const numberB * B_val_ptr = &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]]; - const numberB * const end_cols = + const numberB * const end_cols = &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col+1]]- &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]]; for (; B_val_ptr != end_cols; ++B_val_ptr) @@ -1014,7 +1014,7 @@ SparseMatrix::Tmmult (SparseMatrix &C, { // need to change the sparsity pattern of // C, so cast away const-ness. - SparsityPattern & sp_C = + SparsityPattern & sp_C = *(const_cast(&C.get_sparsity_pattern())); C.clear(); sp_C.reinit (0,0,0); @@ -1032,9 +1032,9 @@ SparseMatrix::Tmmult (SparseMatrix &C, CompressedSimpleSparsityPattern csp (n(), B.n()); for (unsigned int i = 0; i < sp_A.n_rows(); ++i) { - const unsigned int * rows = + const unsigned int * rows = &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; - const unsigned int *const end_rows = + const unsigned int *const end_rows = &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; unsigned int * new_cols = const_cast (&sp_B.get_column_numbers() @@ -1086,30 +1086,30 @@ SparseMatrix::Tmmult (SparseMatrix &C, // that row in B. for (unsigned int i=0; i::Tmmult (SparseMatrix &C, // matrix B. Cache the elements, and then // write them into C at once numberC * new_ptr = &new_entries[0]; - const numberB * B_val_ptr = + const numberB * B_val_ptr = &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]]; for (; B_val_ptr != end_cols; ++B_val_ptr) *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(i) : 1); @@ -1219,7 +1219,7 @@ namespace internal OutVector &dst) { number norm_sqr=0.; - + for (unsigned int i=begin_row; i::precondition_Jacobi (Vector &dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n())); Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n())); @@ -1321,7 +1321,7 @@ SparseMatrix::precondition_SSOR (Vector &dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n())); Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n())); @@ -1341,7 +1341,7 @@ SparseMatrix::precondition_SSOR (Vector &dst, for (unsigned int row=0; row::precondition_SSOR (Vector &dst, *dst_ptr -= s * om; *dst_ptr /= val[*rowstart_ptr]; }; - + rowstart_ptr = &cols->rowstart[0]; dst_ptr = &dst(0); - for (unsigned int row=0; rowrowstart[n]; ++rowstart_ptr, ++dst_ptr) *dst_ptr *= (2.-om)*val[*rowstart_ptr]; // backward sweep @@ -1370,7 +1370,7 @@ SparseMatrix::precondition_SSOR (Vector &dst, number s = 0; for (unsigned int j=first_right_of_diagonal_index; jcolnums[j]); - + *dst_ptr -= s * om; *dst_ptr /= val[*rowstart_ptr]; }; @@ -1406,7 +1406,7 @@ SparseMatrix::precondition_SSOR (Vector &dst, *dst_ptr -= s * om; *dst_ptr /= val[*rowstart_ptr]; }; - + rowstart_ptr = &cols->rowstart[0]; dst_ptr = &dst(0); for (unsigned int row=0; row::precondition_SOR (Vector& dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + dst = src; SOR(dst,om); @@ -1461,7 +1461,7 @@ SparseMatrix::precondition_TSOR (Vector& dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + dst = src; TSOR(dst,om); @@ -1478,7 +1478,7 @@ SparseMatrix::SOR (Vector& dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); for (unsigned int row=0; row::SOR (Vector& dst, if (col < row) s -= val[j] * dst(col); } - + dst(row) = s * om / val[cols->rowstart[row]]; } } @@ -1506,12 +1506,11 @@ SparseMatrix::TSOR (Vector& dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); - for (unsigned int row=m(); row!=0;) + for (unsigned int row=m(); row!=0; --row) { - --row; somenumber s = dst(row); for (unsigned int j=cols->rowstart[row]; jrowstart[row+1]; ++j) if (cols->colnums[j] > row) @@ -1534,7 +1533,7 @@ SparseMatrix::PSOR (Vector& dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); Assert (m() == permutation.size(), ExcDimensionMismatch(m(), permutation.size())); @@ -1545,7 +1544,7 @@ SparseMatrix::PSOR (Vector& dst, { const unsigned int row = permutation[urow]; somenumber s = dst(row); - + for (unsigned int j=cols->rowstart[row]; jrowstart[row+1]; ++j) { const unsigned int col = cols->colnums[j]; @@ -1572,7 +1571,7 @@ SparseMatrix::TPSOR (Vector& dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); Assert (m() == permutation.size(), ExcDimensionMismatch(m(), permutation.size())); @@ -1608,7 +1607,7 @@ SparseMatrix::Jacobi_step (Vector &v, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size())); Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size())); @@ -1640,7 +1639,7 @@ SparseMatrix::SOR_step (Vector &v, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size())); Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size())); @@ -1668,7 +1667,7 @@ SparseMatrix::TSOR_step (Vector &v, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size())); Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size())); @@ -1708,13 +1707,13 @@ SparseMatrix::SSOR (Vector& dst, Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(), typename SparsityPattern::ExcDiagonalNotOptimized()); - + Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size())); const unsigned int n = dst.size(); unsigned int j; somenumber s; - + for (unsigned int i=0; i::SSOR (Vector& dst, dst(i) /= val[cols->rowstart[i]]; } - for (int i=n-1; i>=0; i--) // this time, i is signed, but alsways positive! + for (int i=n-1; i>=0; i--) // this time, i is signed, but always positive! { s = 0.; for (j=cols->rowstart[i]; jrowstart[i+1] ;j++) @@ -1769,7 +1768,7 @@ void SparseMatrix::print_formatted (std::ostream &out, Assert (val != 0, ExcNotInitialized()); unsigned int width = width_; - + std::ios::fmtflags old_flags = out.flags(); unsigned int old_precision = out.precision (precision); @@ -1829,7 +1828,7 @@ void SparseMatrix::print_pattern (std::ostream &out, template void -SparseMatrix::block_write (std::ostream &out) const +SparseMatrix::block_write (std::ostream &out) const { AssertThrow (out, ExcIO()); @@ -1841,7 +1840,7 @@ SparseMatrix::block_write (std::ostream &out) const reinterpret_cast(&val[max_len]) - reinterpret_cast(&val[0])); out << ']'; - + AssertThrow (out, ExcIO()); } @@ -1868,7 +1867,7 @@ SparseMatrix::block_read (std::istream &in) // reallocate space delete[] val; val = new number[max_len]; - + // then read data in.read (reinterpret_cast(&val[0]), reinterpret_cast(&val[max_len])