// $Id$
// Version: $Name$
//
-// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
SparseMatrix<number>::~SparseMatrix ()
{
cols = 0;
-
+
if (val != 0)
delete[] val;
}
SparseMatrix<number>::operator = (const double d)
{
Assert (d==0, ExcScalarAssignmentOnlyForZeroValue());
-
+
Assert (cols != 0, ExcNotInitialized());
Assert (cols->compressed || cols->empty(), SparsityPattern::ExcNotCompressed());
{
Assert (cols != 0, ExcNotInitialized());
Assert (cols->rows == cols->cols, ExcNotQuadratic());
-
+
const unsigned int n_rows = m();
for (unsigned int row=0; row<n_rows; ++row)
{
number *val_ptr = &val[cols->rowstart[row]];
if (cols->optimize_diagonal())
++val_ptr;
- const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[row]+1];
+ const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[row]+1];
const number *const val_end_of_row = &val[cols->rowstart[row+1]];
// treat lower left triangle
std::copy (&matrix.val[0], &matrix.val[cols->n_nonzero_elements()],
&val[0]);
-
+
return *this;
}
// just go through the column indices and
// look whether we found one, rather than
// doing many binary searches
- if (elide_zero_values == false && col_indices_are_sorted == true &&
+ if (elide_zero_values == false && col_indices_are_sorted == true &&
n_cols > 3)
{
// check whether the given indices are
// really sorted
#ifdef DEBUG
for (unsigned int i=1; i<n_cols; ++i)
- Assert (col_indices[i] > col_indices[i-1],
+ Assert (col_indices[i] > col_indices[i-1],
ExcMessage("List of indices not sorted or with duplicates."));
#endif
- const unsigned int * this_cols =
+ const unsigned int * this_cols =
&cols->get_column_numbers()[cols->get_rowstart_indices()[row]];
number * val_ptr = &val[cols->get_rowstart_indices()[row]];
// find diagonal and add it if found
Assert (this_cols[0] == row, ExcInternalError());
- const unsigned int * diag_pos =
+ const unsigned int * diag_pos =
internals::SparsityPatternTools::optimized_lower_bound (col_indices,
col_indices+n_cols,
row);
for (unsigned int i=0; i<n_cols; ++i)
{
Assert (col_indices[i] >= this_cols[counter], ExcInternalError());
-
+
while (this_cols[counter] < col_indices[i])
++counter;
const InVector &v)
{
number norm_sqr=0.;
-
+
for (unsigned int i=begin_row; i<end_row; ++i)
{
number s = 0;
const InVector &v)
{
number norm_sqr=0.;
-
+
for (unsigned int i=begin_row; i<end_row; ++i)
{
number s = 0;
{
// need to change the sparsity pattern of
// C, so cast away const-ness.
- SparsityPattern & sp_C =
+ SparsityPattern & sp_C =
*(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
C.clear();
sp_C.reinit (0,0,0);
CompressedSimpleSparsityPattern csp (m(), B.n());
for (unsigned int i = 0; i < csp.n_rows(); ++i)
{
- const unsigned int * rows =
+ const unsigned int * rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
+ const unsigned int *const end_rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
for (; rows != end_rows; ++rows)
{
// that row in B.
for (unsigned int i=0; i<C.m(); ++i)
{
- const unsigned int * rows =
+ const unsigned int * rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
+ const unsigned int *const end_rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
for (; rows != end_rows; ++rows)
{
- const double A_val = global_entry
+ const double A_val = global_entry
(rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
const unsigned int col = *rows;
- const unsigned int * new_cols =
+ const unsigned int * new_cols =
(&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col]]);
// special treatment for diagonal
if (sp_B.optimize_diagonal())
{
- C.add (i, *new_cols, A_val *
+ C.add (i, *new_cols, A_val *
B.global_entry(new_cols-&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[0]]) *
+ [sp_B.get_rowstart_indices()[0]]) *
(use_vector ? V(col) : 1));
++new_cols;
}
// matrix B. Cache the elements, and then
// write them into C at once
numberC * new_ptr = &new_entries[0];
- const numberB * B_val_ptr =
+ const numberB * B_val_ptr =
&B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
- const numberB * const end_cols =
+ const numberB * const end_cols =
&B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col+1]]-
&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
{
// need to change the sparsity pattern of
// C, so cast away const-ness.
- SparsityPattern & sp_C =
+ SparsityPattern & sp_C =
*(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
C.clear();
sp_C.reinit (0,0,0);
CompressedSimpleSparsityPattern csp (n(), B.n());
for (unsigned int i = 0; i < sp_A.n_rows(); ++i)
{
- const unsigned int * rows =
+ const unsigned int * rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
+ const unsigned int *const end_rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
unsigned int * new_cols = const_cast<unsigned int*>
(&sp_B.get_column_numbers()
// that row in B.
for (unsigned int i=0; i<m(); ++i)
{
- const unsigned int * rows =
+ const unsigned int * rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
+ const unsigned int *const end_rows =
&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
- const unsigned int * new_cols =
+ const unsigned int * new_cols =
(&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i]]);
if (sp_B.optimize_diagonal())
++new_cols;
- const numberB * const end_cols =
+ const numberB * const end_cols =
&B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i+1]]-
&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
for (; rows != end_rows; ++rows)
{
const unsigned int row = *rows;
- const double A_val = global_entry
+ const double A_val = global_entry
(rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
// special treatment for diagonal
if (sp_B.optimize_diagonal())
- C.add (row, i, A_val *
+ C.add (row, i, A_val *
B.global_entry(new_cols-1-&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[0]]) *
+ [sp_B.get_rowstart_indices()[0]]) *
(use_vector ? V(i) : 1));
// now the innermost loop that goes over
// matrix B. Cache the elements, and then
// write them into C at once
numberC * new_ptr = &new_entries[0];
- const numberB * B_val_ptr =
+ const numberB * B_val_ptr =
&B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
*new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(i) : 1);
OutVector &dst)
{
number norm_sqr=0.;
-
+
for (unsigned int i=begin_row; i<end_row; ++i)
{
number s = b(i);
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n()));
Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n()));
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (dst.size() == n(), ExcDimensionMismatch (dst.size(), n()));
Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n()));
for (unsigned int row=0; row<n; ++row, ++dst_ptr, ++rowstart_ptr)
{
*dst_ptr = src(row);
- const unsigned int first_right_of_diagonal_index =
+ const unsigned int first_right_of_diagonal_index =
pos_right_of_diagonal[row];
Assert (first_right_of_diagonal_index <= *(rowstart_ptr+1),
ExcInternalError());
*dst_ptr -= s * om;
*dst_ptr /= val[*rowstart_ptr];
};
-
+
rowstart_ptr = &cols->rowstart[0];
dst_ptr = &dst(0);
- for (unsigned int row=0; row<n; ++row, ++rowstart_ptr, ++dst_ptr)
+ for ( ; rowstart_ptr!=&cols->rowstart[n]; ++rowstart_ptr, ++dst_ptr)
*dst_ptr *= (2.-om)*val[*rowstart_ptr];
// backward sweep
number s = 0;
for (unsigned int j=first_right_of_diagonal_index; j<end_row; ++j)
s += val[j] * dst(cols->colnums[j]);
-
+
*dst_ptr -= s * om;
*dst_ptr /= val[*rowstart_ptr];
};
*dst_ptr -= s * om;
*dst_ptr /= val[*rowstart_ptr];
};
-
+
rowstart_ptr = &cols->rowstart[0];
dst_ptr = &dst(0);
for (unsigned int row=0; row<n; ++row, ++rowstart_ptr, ++dst_ptr)
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
dst = src;
SOR(dst,om);
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
dst = src;
TSOR(dst,om);
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
for (unsigned int row=0; row<m(); ++row)
if (col < row)
s -= val[j] * dst(col);
}
-
+
dst(row) = s * om / val[cols->rowstart[row]];
}
}
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
- for (unsigned int row=m(); row!=0;)
+ for (unsigned int row=m(); row!=0; --row)
{
- --row;
somenumber s = dst(row);
for (unsigned int j=cols->rowstart[row]; j<cols->rowstart[row+1]; ++j)
if (cols->colnums[j] > row)
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
Assert (m() == permutation.size(),
ExcDimensionMismatch(m(), permutation.size()));
{
const unsigned int row = permutation[urow];
somenumber s = dst(row);
-
+
for (unsigned int j=cols->rowstart[row]; j<cols->rowstart[row+1]; ++j)
{
const unsigned int col = cols->colnums[j];
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
Assert (m() == permutation.size(),
ExcDimensionMismatch(m(), permutation.size()));
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size()));
Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size()));
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size()));
Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size()));
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == v.size(), ExcDimensionMismatch(m(),v.size()));
Assert (m() == b.size(), ExcDimensionMismatch(m(),b.size()));
Assert (val != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(),
typename SparsityPattern::ExcDiagonalNotOptimized());
-
+
Assert (m() == dst.size(), ExcDimensionMismatch(m(),dst.size()));
const unsigned int n = dst.size();
unsigned int j;
somenumber s;
-
+
for (unsigned int i=0; i<n; i++)
{
s = 0.;
dst(i) /= val[cols->rowstart[i]];
}
- for (int i=n-1; i>=0; i--) // this time, i is signed, but alsways positive!
+ for (int i=n-1; i>=0; i--) // this time, i is signed, but always positive!
{
s = 0.;
for (j=cols->rowstart[i]; j<cols->rowstart[i+1] ;j++)
Assert (val != 0, ExcNotInitialized());
unsigned int width = width_;
-
+
std::ios::fmtflags old_flags = out.flags();
unsigned int old_precision = out.precision (precision);
template <typename number>
void
-SparseMatrix<number>::block_write (std::ostream &out) const
+SparseMatrix<number>::block_write (std::ostream &out) const
{
AssertThrow (out, ExcIO());
reinterpret_cast<const char*>(&val[max_len])
- reinterpret_cast<const char*>(&val[0]));
out << ']';
-
+
AssertThrow (out, ExcIO());
}
// reallocate space
delete[] val;
val = new number[max_len];
-
+
// then read data
in.read (reinterpret_cast<char*>(&val[0]),
reinterpret_cast<char*>(&val[max_len])