bool empty () const;
/**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
+ * Return the dimension of the image space. To remember: the matrix is of
+ * dimension $m \times n$.
*/
- unsigned int m () const;
+ size_type m () const;
/**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
+ * Return the dimension of the range space. To remember: the matrix is of
+ * dimension $m \times n$.
*/
- unsigned int n () const;
+ size_type n () const;
/**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
+ * Return the number of nonzero elements of this matrix. Actually, it
+ * returns the number of entries in the sparsity pattern; if any of the
+ * entries should happen to be zero, it is counted anyway.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
- * Return the number of actually
- * nonzero elements of this
- * matrix.
+ * Return the number of actually nonzero elements of this matrix.
*
- * Note, that this function does
- * (in contrary to
- * n_nonzero_elements()) not
- * count all entries of the
- * sparsity pattern but only the
- * ones that are nonzero.
+ * Note, that this function does (in contrary to n_nonzero_elements()) not
+ * count all entries of the sparsity pattern but only the ones that are
+ * nonzero.
*/
- unsigned int n_actually_nonzero_elements () const;
+ size_type n_actually_nonzero_elements () const;
/**
- * Return a (constant) reference
- * to the underlying sparsity
- * pattern of this matrix.
+ * Return a (constant) reference to the underlying sparsity pattern of this
+ * matrix.
*
- * Though the return value is
- * declared <tt>const</tt>, you
- * should be aware that it may
- * change if you call any
- * nonconstant function of
- * objects which operate on it.
+ * Though the return value is declared <tt>const</tt>, you should be aware
+ * that it may change if you call any nonconstant function of objects which
+ * operate on it.
*/
const ChunkSparsityPattern &get_sparsity_pattern () const;
*/
//@{
/**
- * Set the element (<i>i,j</i>)
- * to <tt>value</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
+ * Set the element (<i>i,j</i>) to <tt>value</tt>. Throws an error if the
+ * entry does not exist or if <tt>value</tt> is not a finite number. Still,
+ * it is allowed to store zero values in non-existent fields.
*/
- void set (const unsigned int i,
- const unsigned int j,
+ void set (const size_type i,
+ const size_type j,
const number value);
/**
- * Add <tt>value</tt> to the
- * element (<i>i,j</i>). Throws
- * an error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
+ * Add <tt>value</tt> to the element (<i>i,j</i>). Throws an error if the
+ * entry does not exist or if <tt>value</tt> is not a finite number. Still,
+ * it is allowed to store zero values in non-existent fields.
*/
- void add (const unsigned int i,
- const unsigned int j,
+ void add (const size_type i,
+ const size_type j,
const number value);
/**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
+ * Add an array of values given by <tt>values</tt> in the given global
+ * matrix row at columns specified by col_indices in the sparse matrix.
*
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
+ * The optional parameter <tt>elide_zero_values</tt> can be used to specify
+ * whether zero values should be added anyway or these should be filtered
+ * away and only non-zero data is added. The default value is <tt>true</tt>,
+ * i.e., zero values won't be added into the matrix.
*/
template <typename number2>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
+ void add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number2 *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
/**
- * Multiply the entire matrix by a
- * fixed factor.
+ * Multiply the entire matrix by a fixed factor.
*/
ChunkSparseMatrix &operator *= (const number factor);
//@{
/**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you
- * should always take care where
- * to call this function. In
- * order to avoid abuse, this
- * function throws an exception
- * if the required element does
- * not exist in the matrix.
+ * Return the value of the entry (<i>i,j</i>). This may be an expensive
+ * operation and you should always take care where to call this function.
+ * In order to avoid abuse, this function throws an exception if the
+ * required element does not exist in the matrix.
*
- * In case you want a function
- * that returns zero instead (for
- * entries that are not in the
- * sparsity pattern of the
- * matrix), use the el()
- * function.
+ * In case you want a function that returns zero instead (for entries that
+ * are not in the sparsity pattern of the matrix), use the el() function.
*
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
+ * If you are looping over all elements, consider using one of the iterator
+ * classes instead, since they are tailored better to a sparse matrix
* structure.
*/
- number operator () (const unsigned int i,
- const unsigned int j) const;
+ number operator () (const size_type i,
+ const size_type j) const;
/**
- * This function is mostly like
- * operator()() in that it
- * returns the value of the
- * matrix entry (<i>i,j</i>). The
- * only difference is that if
- * this entry does not exist in
- * the sparsity pattern, then
- * instead of raising an
- * exception, zero is
- * returned. While this may be
- * convenient in some cases, note
- * that it is simple to write
- * algorithms that are slow
- * compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
+ * This function is mostly like operator()() in that it returns the value of
+ * the matrix entry (<i>i,j</i>). The only difference is that if this entry
+ * does not exist in the sparsity pattern, then instead of raising an
+ * exception, zero is returned. While this may be convenient in some cases,
+ * note that it is simple to write algorithms that are slow compared to an
+ * optimal solution, since the sparsity of the matrix is not used.
*
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
+ * If you are looping over all elements, consider using one of the iterator
+ * classes instead, since they are tailored better to a sparse matrix
* structure.
*/
- number el (const unsigned int i,
- const unsigned int j) const;
+ number el (const size_type i,
+ const size_type j) const;
/**
* Return the main diagonal
* error if the matrix is not
* quadratic.
*
- * This function is considerably
- * faster than the operator()(),
- * since for quadratic matrices, the
- * diagonal entry may be the
- * first to be stored in each row
- * and access therefore does not
- * involve searching for the
- * right column number.
+ * This function is considerably faster than the operator()(), since for
+ * quadratic matrices, the diagonal entry may be the first to be stored in
+ * each row and access therefore does not involve searching for the right
+ * column number.
*/
- number diag_element (const unsigned int i) const;
+ number diag_element (const size_type i) const;
/**
- * Same as above, but return a
- * writeable reference. You're
- * sure you know what you do?
+ * Same as above, but return a writeable reference. You're sure you know
+ * what you do?
*/
- number &diag_element (const unsigned int i);
+ number &diag_element (const size_type i);
//@}
/**
number *val;
/**
- * Allocated size of #val. This
- * can be larger than the
- * actually used part if the size
- * of the matrix was reduced
- * somewhen in the past by
- * associating a sparsity pattern
- * with a smaller size to this
- * object, using the reinit()
+ * Allocated size of #val. This can be larger than the actually used part if
+ * the size of the matrix was reduced somewhen in the past by associating a
+ * sparsity pattern with a smaller size to this object, using the reinit()
* function.
*/
- unsigned int max_len;
+ size_type max_len;
/**
- * Return the location of entry
- * $(i,j)$ within the val array.
+ * Return the location of entry $(i,j)$ within the val array.
*/
- unsigned int compute_location (const unsigned int i,
- const unsigned int j) const;
+ size_type compute_location (const size_type i,
+ const size_type j) const;
- // make all other sparse matrices
- // friends
+ // make all other sparse matrices friends
template <typename somenumber> friend class ChunkSparseMatrix;
+
+ /**
+ * Also give access to internal details to the iterator/accessor
+ * classes.
+ */
+ template <typename,bool> friend class ChunkSparseMatrixIterators::Iterator;
+ template <typename,bool> friend class ChunkSparseMatrixIterators::Accessor;
};
/*@}*/
+ template <typename number>
+ inline
+ const ChunkSparsityPattern &
+ ChunkSparseMatrix<number>::get_sparsity_pattern () const
+ {
+ Assert (cols != 0, ExcNotInitialized());
+ return *cols;
+ }
+
+
+
template <typename number>
inline
-unsigned int
-ChunkSparseMatrix<number>::compute_location (const unsigned int i,
- const unsigned int j) const
+typename ChunkSparseMatrix<number>::size_type
+ChunkSparseMatrix<number>::compute_location (const size_type i,
+ const size_type j) const
{
- const unsigned int chunk_size = cols->get_chunk_size();
- const unsigned int chunk_index
+ const size_type chunk_size = cols->get_chunk_size();
+ const size_type chunk_index
- = cols->sparsity_pattern(i/chunk_size, j/chunk_size);
+ = cols->sparsity_pattern(i/chunk_size, j/chunk_size);
if (chunk_index == ChunkSparsityPattern::invalid_entry)
return ChunkSparsityPattern::invalid_entry;
Assert (numbers::is_finite(value), ExcNumberNotFinite());
Assert (cols != 0, ExcNotInitialized());
- // it is allowed to set elements of
- // the matrix that are not part of
- // the sparsity pattern, if the
- // value to which we set it is zero
+ // it is allowed to set elements of the matrix that are not part of the
+ // sparsity pattern, if the value to which we set it is zero
- const unsigned int index = compute_location(i,j);
+ const size_type index = compute_location(i,j);
Assert ((index != SparsityPattern::invalid_entry) ||
(value == 0.),
ExcInvalidIndex(i,j));
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
- // multiply all elements of the matrix with
- // the given factor. this includes the
- // padding elements in chunks that overlap
- // the boundaries of the actual matrix --
- // but since multiplication with a number
- // does not violate the invariant of
- // keeping these elements at zero nothing
- // can happen
+ // multiply all elements of the matrix with the given factor. this includes
+ // the padding elements in chunks that overlap the boundaries of the actual
+ // matrix -- but since multiplication with a number does not violate the
+ // invariant of keeping these elements at zero nothing can happen
number *val_ptr = val;
const number *const end_ptr = val +
cols->sparsity_pattern.n_nonzero_elements()
const number factor_inv = 1. / factor;
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
- // multiply all elements of the matrix with
- // the given factor. this includes the
- // padding elements in chunks that overlap
- // the boundaries of the actual matrix --
- // but since multiplication with a number
- // does not violate the invariant of
- // keeping these elements at zero nothing
- // can happen
+ // multiply all elements of the matrix with the given factor. this includes
+ // the padding elements in chunks that overlap the boundaries of the actual
+ // matrix -- but since multiplication with a number does not violate the
+ // invariant of keeping these elements at zero nothing can happen
number *val_ptr = val;
const number *const end_ptr = val +
cols->sparsity_pattern.n_nonzero_elements()
Assert (m() == n(), ExcNotQuadratic());
Assert (i<m(), ExcInvalidIndex1(i));
- // Use that the first element in each row
- // of a quadratic matrix is the main
- // diagonal of the chunk sparsity pattern
- const size_type chunk_size = cols->get_chunk_size();
- return val[cols->sparsity_pattern.rowstart[i/chunk_size]
- *
- chunk_size * chunk_size
- +
- (i % chunk_size) * chunk_size
- +
- (i % chunk_size)];
- }
-
-
-
- template <typename number>
- inline
- number &ChunkSparseMatrix<number>::diag_element (const size_type i)
- {
- Assert (cols != 0, ExcNotInitialized());
- Assert (m() == n(), ExcNotQuadratic());
- Assert (i<m(), ExcInvalidIndex1(i));
-
- // Use that the first element in each row
- // of a quadratic matrix is the main
+ // Use that the first element in each row of a quadratic matrix is the main
// diagonal of the chunk sparsity pattern
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
return val[cols->sparsity_pattern.rowstart[i/chunk_size]
*
chunk_size * chunk_size
ChunkSparseMatrix<number>::copy_from (const ForwardIterator begin,
const ForwardIterator end)
{
- Assert (static_cast<unsigned int>(std::distance (begin, end)) == m(),
+ Assert (static_cast<size_type >(std::distance (begin, end)) == m(),
ExcIteratorRange (std::distance (begin, end), m()));
- // for use in the inner loop, we
- // define a typedef to the type of
- // the inner iterators
+ // for use in the inner loop, we define a typedef to the type of the inner
+ // iterators
typedef typename std::iterator_traits<ForwardIterator>::value_type::const_iterator inner_iterator;
- unsigned int row=0;
+ size_type row=0;
for (ForwardIterator i=begin; i!=end; ++i, ++row)
{
const inner_iterator end_of_row = i->end();
return;
}
- // allocate not just m() * n() elements but
- // enough so that we can store full
- // chunks. this entails some padding
- // elements
+ // allocate not just m() * n() elements but enough so that we can store full
+ // chunks. this entails some padding elements
- const unsigned int chunk_size = cols->get_chunk_size();
- const unsigned int N = cols->sparsity_pattern.n_nonzero_elements() *
+ const size_type chunk_size = cols->get_chunk_size();
+ const size_type N = cols->sparsity_pattern.n_nonzero_elements() *
chunk_size * chunk_size;
if (N > max_len || max_len == 0)
{
{
Assert (cols != 0, ExcNotInitialized());
- // count those elements that are nonzero,
- // even if they lie in the padding around
- // the matrix. since we have the invariant
- // that padding elements are zero, nothing
- // bad can happen here
+ // count those elements that are nonzero, even if they lie in the padding
+ // around the matrix. since we have the invariant that padding elements are
+ // zero, nothing bad can happen here
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
return std::count_if(&val[0],
&val[cols->sparsity_pattern.n_nonzero_elements () *
chunk_size * chunk_size],
Assert (val != 0, ExcNotInitialized());
Assert (cols == matrix.cols, ExcDifferentChunkSparsityPatterns());
- // copy everything, including padding
- // elements
+ // copy everything, including padding elements
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
std::copy (&matrix.val[0],
&matrix.val[cols->sparsity_pattern.n_nonzero_elements()
* chunk_size * chunk_size],
Assert (val != 0, ExcNotInitialized());
Assert (cols == matrix.cols, ExcDifferentChunkSparsityPatterns());
- // add everything, including padding
- // elements
+ // add everything, including padding elements
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
number *val_ptr = &val[0];
const somenumber *matrix_ptr = &matrix.val[0];
const number *const end_ptr = &val[cols->sparsity_pattern.n_nonzero_elements()
Assert (!PointerComparison::equal(&src, &dst), ExcSourceEqualsDestination());
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
- // loop over all chunks. note that we need
- // to treat the last chunk row and column
- // differently if they have padding
- // elements
+ // loop over all chunks. note that we need to treat the last chunk row and
+ // column differently if they have padding elements
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
n_chunk_rows-1 :
n_chunk_rows);
- // like in vmult_add, but don't keep an
- // iterator into dst around since we're not
- // traversing it sequentially this time
+ // like in vmult_add, but don't keep an iterator into dst around since we're
+ // not traversing it sequentially this time
- const number *val_ptr = val;
- const unsigned int *colnum_ptr = cols->sparsity_pattern.colnums;
+ const number *val_ptr = val;
+ const size_type *colnum_ptr = cols->sparsity_pattern.colnums;
- for (unsigned int chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
+ for (size_type chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
{
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
src.begin() + chunk_row * cols->chunk_size,
dst.begin() + *colnum_ptr * cols->chunk_size);
else
- // we're at a chunk column that
- // has padding
+ // we're at a chunk column that has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(*colnum_ptr * cols->chunk_size + c)
+= (val_ptr[r*cols->chunk_size + c] *
src(chunk_row * cols->chunk_size + r));
}
}
- // now deal with last chunk row if
- // necessary
+ // now deal with last chunk row if necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
||
(*colnum_ptr != cols->sparsity_pattern.n_cols()-1))
{
- // we're at a chunk row but not
- // column that has padding
+ // we're at a chunk row but not column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
dst(*colnum_ptr * cols->chunk_size + c)
+= (val_ptr[r*cols->chunk_size + c] *
src(chunk_row * cols->chunk_size + r));
}
else
- // we're at a chunk row and
- // column that has padding
+ // we're at a chunk row and column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(*colnum_ptr * cols->chunk_size + c)
+= (val_ptr[r*cols->chunk_size + c] *
src(chunk_row * cols->chunk_size + r));
somenumber result = 0;
////////////////
- // like matrix_scalar_product, except that
- // the two vectors are now the same
+ // like matrix_scalar_product, except that the two vectors are now the same
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
- // loop over all chunks. note that we need
- // to treat the last chunk row and column
- // differently if they have padding
- // elements
+ // loop over all chunks. note that we need to treat the last chunk row and
+ // column differently if they have padding elements
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
v_ptr,
v.begin() + *colnum_ptr * cols->chunk_size);
else
- // we're at a chunk column that
- // has padding
+ // we're at a chunk column that has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
v(chunk_row * cols->chunk_size + r)
v_ptr += cols->chunk_size;
}
- // now deal with last chunk row if
- // necessary
+ // now deal with last chunk row if necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
||
(*colnum_ptr != cols->sparsity_pattern.n_cols()-1))
{
- // we're at a chunk row but not
- // column that has padding
+ // we're at a chunk row but not column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
result
+=
v(chunk_row * cols->chunk_size + r)
v(*colnum_ptr * cols->chunk_size + c));
}
else
- // we're at a chunk row and
- // column that has padding
+ // we're at a chunk row and column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
v(chunk_row * cols->chunk_size + r)
Assert(m() == u.size(), ExcDimensionMismatch(m(),u.size()));
Assert(n() == v.size(), ExcDimensionMismatch(n(),v.size()));
- // the following works like the vmult_add
- // function
+ // the following works like the vmult_add function
somenumber result = 0;
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
- // loop over all chunks. note that we need
- // to treat the last chunk row and column
- // differently if they have padding
- // elements
+ // loop over all chunks. note that we need to treat the last chunk row and
+ // column differently if they have padding elements
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
u_ptr,
v.begin() + *colnum_ptr * cols->chunk_size);
else
- // we're at a chunk column that
- // has padding
+ // we're at a chunk column that has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
u(chunk_row * cols->chunk_size + r)
u_ptr += cols->chunk_size;
}
- // now deal with last chunk row if
- // necessary
+ // now deal with last chunk row if necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
||
(*colnum_ptr != cols->sparsity_pattern.n_cols()-1))
{
- // we're at a chunk row but not
- // column that has padding
+ // we're at a chunk row but not column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
result
+=
u(chunk_row * cols->chunk_size + r)
v(*colnum_ptr * cols->chunk_size + c));
}
else
- // we're at a chunk row and
- // column that has padding
+ // we're at a chunk row and column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
u(chunk_row * cols->chunk_size + r)
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
- // loop over all rows and columns; it is
- // safe to also loop over the padding
- // elements (they are zero) if we make sure
- // that the vector into which we sum column
- // sums is large enough
+ // loop over all rows and columns; it is safe to also loop over the padding
+ // elements (they are zero) if we make sure that the vector into which we
+ // sum column sums is large enough
Vector<real_type> column_sums(cols->sparsity_pattern.n_cols() *
cols->chunk_size);
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- // this function works like l1_norm(). it
- // can be made more efficient (without
- // allocating a temporary vector) as is
- // done in the SparseMatrix class but since
- // it is rarely called in time critical
- // places it is probably not worth it
+ // this function works like l1_norm(). it can be made more efficient
+ // (without allocating a temporary vector) as is done in the SparseMatrix
+ // class but since it is rarely called in time critical places it is
+ // probably not worth it
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
- // loop over all rows and columns; it is
- // safe to also loop over the padding
- // elements (they are zero) if we make sure
- // that the vector into which we sum column
- // sums is large enough
+ // loop over all rows and columns; it is safe to also loop over the padding
+ // elements (they are zero) if we make sure that the vector into which we
+ // sum column sums is large enough
Vector<real_type> row_sums(cols->sparsity_pattern.n_rows() *
cols->chunk_size);
dst = b;
/////////
- // the rest of this function is like
- // vmult_add, except that we subtract
+ // the rest of this function is like vmult_add, except that we subtract
// rather than add A*u
/////////
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
- // loop over all chunks. note that we need
- // to treat the last chunk row and column
- // differently if they have padding
- // elements
+ // loop over all chunks. note that we need to treat the last chunk row and
+ // column differently if they have padding elements
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
u.begin() + *colnum_ptr * cols->chunk_size,
dst_ptr);
else
- // we're at a chunk column that
- // has padding
+ // we're at a chunk column that has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
-= (val_ptr[r*cols->chunk_size + c] *
u(*colnum_ptr * cols->chunk_size + c));
dst_ptr += cols->chunk_size;
}
- // now deal with last chunk row if
- // necessary
+ // now deal with last chunk row if necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
||
(*colnum_ptr != cols->sparsity_pattern.n_cols()-1))
{
- // we're at a chunk row but not
- // column that has padding
+ // we're at a chunk row but not column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
-= (val_ptr[r*cols->chunk_size + c] *
u(*colnum_ptr * cols->chunk_size + c));
}
else
- // we're at a chunk row and
- // column that has padding
+ // we're at a chunk row and column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
-= (val_ptr[r*cols->chunk_size + c] *
u(*colnum_ptr * cols->chunk_size + c));
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
- // loop over all chunk rows and columns,
- // and each time we find something repeat
- // it chunk_size times in both directions
+ // loop over all chunk rows and columns, and each time we find something
+ // repeat it chunk_size times in both directions
- for (unsigned int i=0; i<cols->sparsity_pattern.n_rows(); ++i)
+ for (size_type i=0; i<cols->sparsity_pattern.n_rows(); ++i)
{
- for (unsigned int d=0; d<chunk_size; ++d)
- for (unsigned int j=0; j<cols->sparsity_pattern.n_cols(); ++j)
+ for (size_type d=0; d<chunk_size; ++d)
+ for (size_type j=0; j<cols->sparsity_pattern.n_cols(); ++j)
if (cols->sparsity_pattern(i,j) == SparsityPattern::invalid_entry)
{
- for (unsigned int e=0; e<chunk_size; ++e)
+ for (size_type e=0; e<chunk_size; ++e)
out << '.';
}
else if (std::fabs(val[cols->sparsity_pattern(i,j)]) > threshold)
class ChunkSparsityPattern : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef types::global_dof_index size_type;
+ /**
+ * Typedef an iterator class that allows to walk over all nonzero elements
+ * of a sparsity pattern.
+ */
+ typedef ChunkSparsityPatternIterators::Iterator const_iterator;
+
+ /**
+ * Typedef an iterator class that allows to walk over all nonzero elements
+ * of a sparsity pattern.
+ *
+ * Since the iterator does not allow to modify the sparsity pattern, this
+ * type is the same as that for @p const_iterator.
+ */
+ typedef ChunkSparsityPatternIterators::Iterator iterator;
/**
- * Define a value which is used
- * to indicate that a certain
- * value in the colnums array
- * is unused, i.e. does not
- * represent a certain column
- * number index.
+ * Define a value which is used to indicate that a certain value in the
+ * colnums array is unused, i.e. does not represent a certain column number
+ * index.
*
- * Indices with this invalid
- * value are used to insert new
- * entries to the sparsity
- * pattern using the add() member
- * function, and are removed when
+ * Indices with this invalid value are used to insert new entries to the
+ * sparsity pattern using the add() member function, and are removed when
* calling compress().
*
- * You should not assume that the
- * variable declared here has a
- * certain value. The
- * initialization is given here
- * only to enable the compiler to
- * perform some optimizations,
- * but the actual value of the
- * variable may change over time.
+ * You should not assume that the variable declared here has a certain
+ * value. The initialization is given here only to enable the compiler to
+ * perform some optimizations, but the actual value of the variable may
+ * change over time.
*/
- static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
+ static const size_type invalid_entry = SparsityPattern::invalid_entry;
/**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
+ * Initialize the matrix empty, that is with no memory allocated. This is
+ * useful if you want such objects as member variables in other classes. You
+ * can make the structure usable by calling the reinit() function.
*/
ChunkSparsityPattern ();
*
* @arg m number of rows
* @arg n number of columns
- * @arg max_per_row maximum
- * number of nonzero entries per row
+ * @arg max_per_row maximum number of nonzero entries per row
*/
- ChunkSparsityPattern (const unsigned int m,
- const unsigned int n,
- const unsigned int max_chunks_per_row,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const size_type max_chunks_per_row,
+ const size_type chunk_size);
/**
* @deprecated This constructor is deprecated. Use the version
*
* @arg m number of rows
* @arg n number of columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
+ * @arg row_lengths possible number of nonzero entries for each row. This
+ * vector must have one entry for each row.
*/
- ChunkSparsityPattern (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size);
/**
* @deprecated This constructor is deprecated. Use the version
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
- * Initialize a quadratic matrix
- * of dimension <tt>n</tt> with
- * at most <tt>max_per_row</tt>
- * nonzero entries per row.
+ * Initialize a quadratic matrix of dimension <tt>n</tt> with at most
+ * <tt>max_per_row</tt> nonzero entries per row.
*
- * This constructor automatically
- * enables optimized storage of
- * diagonal elements. To avoid
- * this, use the constructor
- * taking row and column numbers
- * separately.
+ * This constructor automatically enables optimized storage of diagonal
+ * elements. To avoid this, use the constructor taking row and column
+ * numbers separately.
*/
- ChunkSparsityPattern (const unsigned int n,
- const unsigned int max_per_row,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size);
/**
* Initialize a quadratic matrix.
*
* @arg m number of rows and columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
+ * @arg row_lengths possible number of nonzero entries for each row. This
+ * vector must have one entry for each row.
*/
- ChunkSparsityPattern (const unsigned int m,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type m,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size);
/**
* @deprecated This constructor is deprecated. Use the version
ChunkSparsityPattern &operator = (const ChunkSparsityPattern &);
/**
- * Reallocate memory and set up data
- * structures for a new matrix with
- * <tt>m </tt>rows and <tt>n</tt> columns,
- * with at most <tt>max_per_row</tt>
+ * Reallocate memory and set up data structures for a new matrix with <tt>m
+ * </tt>rows and <tt>n</tt> columns, with at most <tt>max_per_row</tt>
* nonzero entries per row.
*
- * This function simply maps its
- * operations to the other
- * <tt>reinit</tt> function.
+ * This function simply maps its operations to the other <tt>reinit</tt>
+ * function.
*/
- void reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const unsigned int chunk_size);
+ void reinit (const size_type m,
+ const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
* @deprecated This function is deprecated. Use the function
* without the last argument
*/
- void reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
- const bool optimize_diagonal) DEAL_II_DEPRECATED;
+ void reinit (const size_type m,
+ const size_type n,
+ const std::vector<size_type > &row_lengths,
+ const size_type chunk_size,
+ const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
- * Same as above, but with a
- * VectorSlice argument instead.
+ * Same as above, but with a VectorSlice argument instead.
*/
- void reinit (const unsigned int m,
- const unsigned int n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const unsigned int chunk_size);
+ void reinit (const size_type m,
+ const size_type n,
+ const VectorSlice<const std::vector<size_type> > &row_lengths,
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
void compress ();
/**
- * This function can be used as a
- * replacement for reinit(),
- * subsequent calls to add() and
- * a final call to close() if you
- * know exactly in advance the
- * entries that will form the
- * matrix sparsity pattern.
- *
- * The first two parameters
- * determine the size of the
- * matrix. For the two last ones,
- * note that a sparse matrix can
- * be described by a sequence of
- * rows, each of which is
- * represented by a sequence of
- * pairs of column indices and
- * values. In the present
- * context, the begin() and
- * end() parameters designate
- * iterators (of forward iterator
- * type) into a container, one
- * representing one row. The
- * distance between begin()
- * and end() should therefore
- * be equal to
- * n_rows(). These iterators
- * may be iterators of
- * <tt>std::vector</tt>,
- * <tt>std::list</tt>, pointers into a
- * C-style array, or any other
- * iterator satisfying the
- * requirements of a forward
- * iterator. The objects pointed
- * to by these iterators
- * (i.e. what we get after
- * applying <tt>operator*</tt> or
- * <tt>operator-></tt> to one of these
- * iterators) must be a container
- * itself that provides functions
- * <tt>begin</tt> and <tt>end</tt>
- * designating a range of
- * iterators that describe the
- * contents of one
- * line. Dereferencing these
- * inner iterators must either
- * yield a pair of a size_type
- * as column index and a
- * value of arbitrary type (such
- * a type would be used if we
- * wanted to describe a sparse
- * matrix with one such object),
- * or simply a size_type
- * (of we only wanted to describe
- * a sparsity pattern). The
- * function is able to determine
- * itself whether an unsigned
- * integer or a pair is what we
- * get after dereferencing the
- * inner iterators, through some
- * template magic.
- *
- * While the order of the outer
- * iterators denotes the
- * different rows of the matrix,
- * the order of the inner
- * iterator denoting the columns
- * does not matter, as they are
- * sorted internal to this
- * function anyway.
- *
- * Since that all sounds very
- * complicated, consider the
- * following example code, which
- * may be used to fill a sparsity
- * pattern:
+ * This function can be used as a replacement for reinit(), subsequent calls
+ * to add() and a final call to close() if you know exactly in advance the
+ * entries that will form the matrix sparsity pattern.
+ *
+ * The first two parameters determine the size of the matrix. For the two
+ * last ones, note that a sparse matrix can be described by a sequence of
+ * rows, each of which is represented by a sequence of pairs of column
+ * indices and values. In the present context, the begin() and end()
+ * parameters designate iterators (of forward iterator type) into a
+ * container, one representing one row. The distance between begin() and
+ * end() should therefore be equal to n_rows(). These iterators may be
+ * iterators of <tt>std::vector</tt>, <tt>std::list</tt>, pointers into a
+ * C-style array, or any other iterator satisfying the requirements of a
+ * forward iterator. The objects pointed to by these iterators (i.e. what we
+ * get after applying <tt>operator*</tt> or <tt>operator-></tt> to one of
+ * these iterators) must be a container itself that provides functions
+ * <tt>begin</tt> and <tt>end</tt> designating a range of iterators that
+ * describe the contents of one line. Dereferencing these inner iterators
+ * must either yield a pair of an unsigned integer as column index and a
+ * value of arbitrary type (such a type would be used if we wanted to
+ * describe a sparse matrix with one such object), or simply an unsigned
+ * integer (of we only wanted to describe a sparsity pattern). The function
+ * is able to determine itself whether an unsigned integer or a pair is what
+ * we get after dereferencing the inner iterators, through some template
+ * magic.
+ *
+ * While the order of the outer iterators denotes the different rows of the
+ * matrix, the order of the inner iterator denoting the columns does not
+ * matter, as they are sorted internal to this function anyway.
+ *
+ * Since that all sounds very complicated, consider the following example
+ * code, which may be used to fill a sparsity pattern:
* @code
- * std::vector<std::vector<unsigned int> > column_indices (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
+ * std::vector<std::vector<size_type> > column_indices (n_rows);
+ * for (size_type row=0; row<n_rows; ++row)
* // generate necessary columns in this row
* fill_row (column_indices[row]);
*
* column_indices.end());
* @endcode
*
- * Note that this example works
- * since the iterators
- * dereferenced yield containers
- * with functions <tt>begin</tt> and
- * <tt>end</tt> (namely
- * <tt>std::vector</tt>s), and the
- * inner iterators dereferenced
- * yield size_type as
- * column indices. Note that we
- * could have replaced each of
- * the two <tt>std::vector</tt>
- * occurrences by <tt>std::list</tt>,
- * and the inner one by
- * <tt>std::set</tt> as well.
- *
- * Another example would be as
- * follows, where we initialize a
- * whole matrix, not only a
- * sparsity pattern:
+ * Note that this example works since the iterators dereferenced yield
+ * containers with functions <tt>begin</tt> and <tt>end</tt> (namely
+ * <tt>std::vector</tt>s), and the inner iterators dereferenced yield
+ * unsigned integers as column indices. Note that we could have replaced
+ * each of the two <tt>std::vector</tt> occurrences by <tt>std::list</tt>,
+ * and the inner one by <tt>std::set</tt> as well.
+ *
+ * Another example would be as follows, where we initialize a whole matrix,
+ * not only a sparsity pattern:
* @code
- * std::vector<std::map<unsigned int,double> > entries (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
+ * std::vector<std::map<size_type,double> > entries (n_rows);
+ * for (size_type row=0; row<n_rows; ++row)
* // generate necessary pairs of columns
* // and corresponding values in this row
* fill_row (entries[row]);
* column_indices.end());
* @endcode
*
- * This example works because
- * dereferencing iterators of the
- * inner type yields a pair of
- * size_type and a value,
- * the first of which we take as
- * column index. As previously,
- * the outer <tt>std::vector</tt>
- * could be replaced by
- * <tt>std::list</tt>, and the inner
- * <tt>std::map<size_type,double></tt>
- * could be replaced by
- * <tt>std::vector<std::pair<size_type,double> ></tt>,
- * or a list or set of such
- * pairs, as they all return
- * iterators that point to such
- * pairs.
+ * This example works because dereferencing iterators of the inner type
+ * yields a pair of unsigned integers and a value, the first of which we
+ * take as column index. As previously, the outer <tt>std::vector</tt> could
+ * be replaced by <tt>std::list</tt>, and the inner <tt>std::map<unsigned
+ * int,double></tt> could be replaced by <tt>std::vector<std::pair<unsigned
+ * int,double> ></tt>, or a list or set of such pairs, as they all return
+ * iterators that point to such pairs.
*/
template <typename ForwardIterator>
- void copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
+ void copy_from (const size_type n_rows,
+ const size_type n_cols,
const ForwardIterator begin,
const ForwardIterator end,
- const unsigned int chunk_size);
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
bool empty () const;
/**
- * Return the chunk size given as
- * argument when constructing this
- * object.
+ * Return the chunk size given as argument when constructing this object.
*/
- unsigned int get_chunk_size () const;
+ size_type get_chunk_size () const;
/**
- * Return the maximum number of entries per
- * row. Before compression, this equals the
- * number given to the constructor, while
- * after compression, it equals the maximum
- * number of entries actually allocated by
- * the user.
+ * Return the maximum number of entries per row. Before compression, this
+ * equals the number given to the constructor, while after compression, it
+ * equals the maximum number of entries actually allocated by the user.
*/
- unsigned int max_entries_per_row () const;
+ size_type max_entries_per_row () const;
/**
- * Add a nonzero entry to the matrix.
- * This function may only be called
- * for non-compressed sparsity patterns.
+ * Add a nonzero entry to the matrix. This function may only be called for
+ * non-compressed sparsity patterns.
*
- * If the entry already exists, nothing
- * bad happens.
+ * If the entry already exists, nothing bad happens.
*/
- void add (const unsigned int i,
- const unsigned int j);
+ void add (const size_type i,
+ const size_type j);
/**
- * Make the sparsity pattern
- * symmetric by adding the
- * sparsity pattern of the
+ * Make the sparsity pattern symmetric by adding the sparsity pattern of the
* transpose object.
*
- * This function throws an
- * exception if the sparsity
- * pattern does not represent a
- * quadratic matrix.
+ * This function throws an exception if the sparsity pattern does not
+ * represent a quadratic matrix.
*/
void symmetrize ();
/**
- * Return number of rows of this
- * matrix, which equals the dimension
- * of the image space.
+ * Return number of rows of this matrix, which equals the dimension of the
+ * image space.
*/
- unsigned int n_rows () const;
+ inline size_type n_rows () const;
/**
- * Return number of columns of this
- * matrix, which equals the dimension
- * of the range space.
+ * Return number of columns of this matrix, which equals the dimension of
+ * the range space.
*/
- unsigned int n_cols () const;
+ inline size_type n_cols () const;
/**
- * Check if a value at a certain
- * position may be non-zero.
+ * Check if a value at a certain position may be non-zero.
*/
- bool exists (const unsigned int i,
- const unsigned int j) const;
+ bool exists (const size_type i,
+ const size_type j) const;
/**
* Number of entries in a specific row.
*/
- unsigned int row_length (const unsigned int row) const;
+ size_type row_length (const size_type row) const;
/**
- * Compute the bandwidth of the matrix
- * represented by this structure. The
- * bandwidth is the maximum of $|i-j|$
- * for which the index pair $(i,j)$
- * represents a nonzero entry of the
- * matrix. Consequently, the maximum
- * bandwidth a $n\times m$ matrix can
- * have is $\max\{n-1,m-1\}$.
+ * Compute the bandwidth of the matrix represented by this structure. The
+ * bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$
+ * represents a nonzero entry of the matrix. Consequently, the maximum
+ * bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.
*/
- unsigned int bandwidth () const;
+ size_type bandwidth () const;
/**
- * Return the number of nonzero elements of
- * this matrix. Actually, it returns the
- * number of entries in the sparsity
- * pattern; if any of the entries should
- * happen to be zero, it is counted
- * anyway.
+ * Return the number of nonzero elements of this matrix. Actually, it
+ * returns the number of entries in the sparsity pattern; if any of the
+ * entries should happen to be zero, it is counted anyway.
*
- * This function may only be called if the
- * matrix struct is compressed. It does not
- * make too much sense otherwise anyway.
+ * This function may only be called if the matrix struct is compressed. It
+ * does not make too much sense otherwise anyway.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
- * Return whether the structure is
- * compressed or not.
+ * Return whether the structure is compressed or not.
*/
bool is_compressed () const;
//@}
private:
/**
- * Number of rows that this sparsity
- * structure shall represent.
+ * Number of rows that this sparsity structure shall represent.
*/
- unsigned int rows;
+ size_type rows;
/**
- * Number of columns that this sparsity
- * structure shall represent.
+ * Number of columns that this sparsity structure shall represent.
*/
- unsigned int cols;
+ size_type cols;
/**
* The size of chunks.
*/
- unsigned int chunk_size;
+ size_type chunk_size;
/**
- * The reduced sparsity pattern. We store
- * only which chunks exist, with each
- * chunk a block in the matrix of size
- * chunk_size by chunk_size.
+ * The reduced sparsity pattern. We store only which chunks exist, with each
+ * chunk a block in the matrix of size chunk_size by chunk_size.
*/
SparsityPattern sparsity_pattern;
#ifndef DOXYGEN
+ namespace ChunkSparsityPatternIterators
+ {
+ inline
+ Accessor::
+ Accessor (const ChunkSparsityPattern *sparsity_pattern,
+ const unsigned int row)
+ :
+ sparsity_pattern(sparsity_pattern),
+ reduced_accessor(row==sparsity_pattern->n_rows() ?
+ *sparsity_pattern->sparsity_pattern.end() :
+ *sparsity_pattern->sparsity_pattern.
+ begin(row/sparsity_pattern->get_chunk_size())),
+ chunk_row (row==sparsity_pattern->n_rows() ? 0 :
+ row%sparsity_pattern->get_chunk_size()),
+ chunk_col (0)
+ {}
+
+
+
+ inline
+ Accessor::
+ Accessor (const ChunkSparsityPattern *sparsity_pattern)
+ :
+ sparsity_pattern(sparsity_pattern),
+ reduced_accessor(*sparsity_pattern->sparsity_pattern.end()),
+ chunk_row (0),
+ chunk_col (0)
+ {}
+
+
+
+ inline
+ bool
+ Accessor::is_valid_entry () const
+ {
+ return reduced_accessor.is_valid_entry()
+ &&
+ sparsity_pattern->get_chunk_size()*reduced_accessor.row()+chunk_row <
+ sparsity_pattern->n_rows()
+ &&
+ sparsity_pattern->get_chunk_size()*reduced_accessor.column()+chunk_col <
+ sparsity_pattern->n_cols();
+ }
+
+
+
+ inline
+ unsigned int
+ Accessor::row() const
+ {
+ Assert (is_valid_entry() == true, ExcInvalidIterator());
+
+ return sparsity_pattern->get_chunk_size()*reduced_accessor.row() +
+ chunk_row;
+ }
+
+
+
+ inline
+ unsigned int
+ Accessor::column() const
+ {
+ Assert (is_valid_entry() == true, ExcInvalidIterator());
+
+ return sparsity_pattern->get_chunk_size()*reduced_accessor.column() +
+ chunk_col;
+ }
+
+
+
+ inline
+ std::size_t
+ Accessor::reduced_index() const
+ {
+ Assert (is_valid_entry() == true, ExcInvalidIterator());
+
+ return reduced_accessor.index_within_sparsity;
+ }
+
+
+
+
+ inline
+ bool
+ Accessor::operator == (const Accessor &other) const
+ {
+ // no need to check for equality of sparsity patterns as this is done in
+ // the reduced case already and every ChunkSparsityPattern has its own
+ // reduced sparsity pattern
+ return (reduced_accessor == other.reduced_accessor &&
+ chunk_row == other.chunk_row &&
+ chunk_col == other.chunk_col);
+ }
+
+
+
+ inline
+ bool
+ Accessor::operator < (const Accessor &other) const
+ {
+ Assert (sparsity_pattern == other.sparsity_pattern,
+ ExcInternalError());
+
+ if (chunk_row != other.chunk_row)
+ {
+ if (reduced_accessor.index_within_sparsity ==
+ reduced_accessor.sparsity_pattern->n_nonzero_elements())
+ return false;
+ if (other.reduced_accessor.index_within_sparsity ==
+ reduced_accessor.sparsity_pattern->n_nonzero_elements())
+ return true;
+
+ const unsigned int
+ global_row = sparsity_pattern->get_chunk_size()*reduced_accessor.row()
+ +chunk_row,
+ other_global_row = sparsity_pattern->get_chunk_size()*
+ other.reduced_accessor.row()+other.chunk_row;
+ if (global_row < other_global_row)
+ return true;
+ else if (global_row > other_global_row)
+ return false;
+ }
+
+ return (reduced_accessor.index_within_sparsity <
+ other.reduced_accessor.index_within_sparsity ||
+ (reduced_accessor.index_within_sparsity ==
+ other.reduced_accessor.index_within_sparsity &&
+ chunk_col < other.chunk_col));
+ }
+
+
+ inline
+ void
+ Accessor::advance ()
+ {
+ const unsigned int chunk_size = sparsity_pattern->get_chunk_size();
+ Assert (chunk_row < chunk_size && chunk_col < chunk_size,
+ ExcIteratorPastEnd());
+ Assert (reduced_accessor.row() * chunk_size + chunk_row <
+ sparsity_pattern->n_rows()
+ &&
+ reduced_accessor.column() * chunk_size + chunk_col <
+ sparsity_pattern->n_cols(),
+ ExcIteratorPastEnd());
+ if (chunk_size == 1)
+ {
+ reduced_accessor.advance();
+ return;
+ }
+
+ ++chunk_col;
+
+ // end of chunk
+ if (chunk_col == chunk_size
+ ||
+ reduced_accessor.column() * chunk_size + chunk_col ==
+ sparsity_pattern->n_cols())
+ {
+ const unsigned int reduced_row = reduced_accessor.row();
+ // end of row
+ if (reduced_accessor.index_within_sparsity + 1 ==
+ reduced_accessor.sparsity_pattern->rowstart[reduced_row+1])
+ {
+ ++chunk_row;
+
+ chunk_col = 0;
+
+ // end of chunk rows or end of matrix
+ if (chunk_row == chunk_size ||
+ (reduced_row * chunk_size + chunk_row ==
+ sparsity_pattern->n_rows()))
+ {
+ chunk_row = 0;
+ reduced_accessor.advance();
+ }
+ // go back to the beginning of the same reduced row but with
+ // chunk_row increased by one
+ else
+ reduced_accessor.index_within_sparsity =
+ reduced_accessor.sparsity_pattern->rowstart[reduced_row];
+ }
+ // advance within chunk
+ else
+ {
+ reduced_accessor.advance();
+ chunk_col = 0;
+ }
+ }
+ }
+
+
+
+ inline
+ Iterator::Iterator (const ChunkSparsityPattern *sparsity_pattern,
+ const unsigned int row)
+ :
+ accessor(sparsity_pattern, row)
+ {}
+
+
+
+ inline
+ Iterator &
+ Iterator::operator++ ()
+ {
+ accessor.advance ();
+ return *this;
+ }
+
+
+
+ inline
+ Iterator
+ Iterator::operator++ (int)
+ {
+ const Iterator iter = *this;
+ accessor.advance ();
+ return iter;
+ }
+
+
+
+ inline
+ const Accessor &
+ Iterator::operator* () const
+ {
+ return accessor;
+ }
+
+
+
+ inline
+ const Accessor *
+ Iterator::operator-> () const
+ {
+ return &accessor;
+ }
+
+
+ inline
+ bool
+ Iterator::operator == (const Iterator &other) const
+ {
+ return (accessor == other.accessor);
+ }
+
+
+
+ inline
+ bool
+ Iterator::operator != (const Iterator &other) const
+ {
+ return ! (accessor == other.accessor);
+ }
+
+
+ inline
+ bool
+ Iterator::operator < (const Iterator &other) const
+ {
+ return accessor < other.accessor;
+ }
+
+ }
+
+
+
+ inline
+ ChunkSparsityPattern::iterator
+ ChunkSparsityPattern::begin () const
+ {
+ return iterator(this, 0);
+ }
+
+
+ inline
+ ChunkSparsityPattern::iterator
+ ChunkSparsityPattern::end () const
+ {
+ return iterator(this, n_rows());
+ }
+
+
+
+ inline
+ ChunkSparsityPattern::iterator
+ ChunkSparsityPattern::begin (const unsigned int r) const
+ {
+ Assert (r<n_rows(), ExcIndexRange(r,0,n_rows()));
+ return iterator(this, r);
+ }
+
+
+
+ inline
+ ChunkSparsityPattern::iterator
+ ChunkSparsityPattern::end (const unsigned int r) const
+ {
+ Assert (r<n_rows(), ExcIndexRange(r,0,n_rows()))
+ return iterator(this, r+1);
+ }
+
+
inline
-unsigned int
+ChunkSparsityPattern::size_type
ChunkSparsityPattern::n_rows () const
{
return rows;
template <typename ForwardIterator>
+ inline
void
-ChunkSparsityPattern::copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
+ChunkSparsityPattern::copy_from (const size_type n_rows,
+ const size_type n_cols,
const ForwardIterator begin,
const ForwardIterator end,
- const unsigned int chunk_size,
+ const size_type chunk_size,
const bool)
{
copy_from (n_rows, n_cols, begin, end, chunk_size);
template <typename ForwardIterator>
void
-ChunkSparsityPattern::copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
+ChunkSparsityPattern::copy_from (const size_type n_rows,
+ const size_type n_cols,
const ForwardIterator begin,
const ForwardIterator end,
- const unsigned int chunk_size)
+ const size_type chunk_size)
{
- Assert (static_cast<unsigned int>(std::distance (begin, end)) == n_rows,
+ Assert (static_cast<size_type>(std::distance (begin, end)) == n_rows,
ExcIteratorRange (std::distance (begin, end), n_rows));
- // first determine row lengths for
- // each row. if the matrix is
- // quadratic, then we might have to
- // add an additional entry for the
- // diagonal, if that is not yet
- // present. as we have to call
- // compress anyway later on, don't
- // bother to check whether that
- // diagonal entry is in a certain
- // row or not
+ // first determine row lengths for each row. if the matrix is quadratic,
+ // then we might have to add an additional entry for the diagonal, if that
+ // is not yet present. as we have to call compress anyway later on, don't
+ // bother to check whether that diagonal entry is in a certain row or not
const bool is_square = (n_rows == n_cols);
- std::vector<unsigned int> row_lengths;
+ std::vector<size_type> row_lengths;
row_lengths.reserve(n_rows);
for (ForwardIterator i=begin; i!=end; ++i)
row_lengths.push_back (std::distance (i->begin(), i->end())
(is_square ? 1 : 0));
reinit (n_rows, n_cols, row_lengths, chunk_size);
- // now enter all the elements into
- // the matrix
+ // now enter all the elements into the matrix
- unsigned int row = 0;
+ size_type row = 0;
typedef typename std::iterator_traits<ForwardIterator>::value_type::const_iterator inner_iterator;
for (ForwardIterator i=begin; i!=end; ++i, ++row)
{
const inner_iterator end_of_row = i->end();
for (inner_iterator j=i->begin(); j!=end_of_row; ++j)
{
- const unsigned int col
+ const size_type col
- = internal::SparsityPatternTools::get_column_index_from_iterator(*j);
+ = internal::SparsityPatternTools::get_column_index_from_iterator(*j);
Assert (col < n_cols, ExcInvalidIndex(col,n_cols));
add (row, col);
* are owned locally and for indices not
* present at all.
*/
- bool is_ghost_entry (const types::global_dof_index global_index) const;
+ bool is_ghost_entry (const size_type global_index) const;
/**
- * Make the @p Vector class a bit like
- * the <tt>vector<></tt> class of the C++
- * standard library by returning
- * iterators to the start and end of the
- * locally owned elements of this vector.
+ * Make the @p Vector class a bit like the <tt>vector<></tt> class of
+ * the C++ standard library by returning iterators to the start and end
+ * of the <i>locally owned</i> elements of this vector.
+ *
+ * It holds that end() - begin() == local_size().
*/
iterator begin ();
* elements of this
* matrix.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
+ * Matrix-vector multiplication: let $dst = M*src$ with $M$ being this
+ * matrix. The vector types can be block vectors or non-block vectors
+ * (only if the matrix has only one row or column, respectively), and need
+ * to define TrilinosWrappers::SparseMatrix::vmult.
*/
- void vmult (MPI::BlockVector &dst,
- const MPI::BlockVector &src) const;
-
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix, now applied
- * to localized block vectors
- * (works only when run on one
- * processor).
- */
- void vmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void vmult (MPI::BlockVector &dst,
- const MPI::Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column, now
- * applied to localized vectors
- * (works only when run on one
- * processor).
- */
- void vmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void vmult (MPI::Vector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row, now
- * applied to localized vectors
- * (works only when run on one
- * processor).
- */
- void vmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void vmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- void Tvmult (MPI::BlockVector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix, now applied
- * to localized Trilinos vectors
- * (works only when run on one
- * processor).
- */
- void Tvmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void Tvmult (MPI::BlockVector &dst,
- const MPI::Vector &src) const;
+ template <typename VectorType1, typename VectorType2>
+ void vmult (VectorType1 &dst,
+ const VectorType2 &src) const;
/**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row, now
- * applied to localized Trilinos
- * vectors (works only when run
- * on one processor).
- */
- void Tvmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void Tvmult (MPI::Vector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column, now
- * applied to localized Trilinos
- * vectors (works only when run
- * on one processor).
- */
- void Tvmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
+ * Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this
+ * matrix. This function does the same as vmult() but takes the transposed
+ * matrix.
*/
- void Tvmult (VectorBase &dst,
- const VectorBase &src) const;
+ template <typename VectorType1, typename VectorType2>
+ void Tvmult (VectorType1 &dst,
+ const VectorType2 &src) const;
/**
* Compute the residual of an
last_action = Insert;
- int *col_index_ptr;
+ TrilinosWrappers::types::int_type *col_index_ptr;
- TrilinosScalar const *col_value_ptr;
+ TrilinosScalar *col_value_ptr;
- int n_columns;
+ TrilinosWrappers::types::int_type n_columns;
- // If we don't elide zeros, the pointers
- // are already available...
+ TrilinosScalar short_val_array[100];
+ int short_index_array[100];
+ std::vector<TrilinosScalar> long_val_array;
+ std::vector<int> long_index_array;
+
+
+ // If we don't elide zeros, the pointers are already available... need to
+ // cast to non-const pointers as that is the format taken by Trilinos (but
+ // we will not modify const data)
if (elide_zero_values == false)
{
- col_index_ptr = (int *)col_indices;
+ col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
- col_value_ptr = values;
+ col_value_ptr = const_cast<TrilinosScalar*>(values);
n_columns = n_cols;
}
else
}
}
- Assert(n_columns <= (int)n_cols, ExcInternalError());
+ Assert(n_columns <= (TrilinosWrappers::types::int_type)n_cols, ExcInternalError());
-
- col_index_ptr = (TrilinosWrappers::types::int_type *)&column_indices[0];
- col_value_ptr = &column_values[0];
}
- // If the calling matrix owns the row to
- // which we want to insert values, we
- // can directly call the Epetra_CrsMatrix
- // input function, which is much faster
- // than the Epetra_FECrsMatrix
- // function. We distinguish between two
- // cases: the first one is when the matrix
- // is not filled (i.e., it is possible to
- // add new elements to the sparsity pattern),
- // and the second one is when the pattern is
- // already fixed. In the former case, we
- // add the possibility to insert new values,
- // and in the second we just replace
+ // If the calling matrix owns the row to which we want to insert values,
+ // we can directly call the Epetra_CrsMatrix input function, which is much
+ // faster than the Epetra_FECrsMatrix function. We distinguish between two
+ // cases: the first one is when the matrix is not filled (i.e., it is
+ // possible to add new elements to the sparsity pattern), and the second
+ // one is when the pattern is already fixed. In the former case, we add
+ // the possibility to insert new values, and in the second we just replace
// data.
- if (row_partitioner().MyGID(static_cast<int>(row)) == true)
+ if (row_partitioner().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
{
if (matrix->Filled() == false)
{
- ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(row, n_columns,
- col_value_ptr,
- col_index_ptr);
+ ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
+ static_cast<TrilinosWrappers::types::int_type>(row),
+ static_cast<int>(n_columns),const_cast<double *>(col_value_ptr),
+ col_index_ptr);
- // When inserting elements, we do
- // not want to create exceptions in
- // the case when inserting non-local
- // data (since that's what we want
- // to do right now).
+ // When inserting elements, we do not want to create exceptions in
+ // the case when inserting non-local data (since that's what we
+ // want to do right now).
if (ierr > 0)
ierr = 0;
}
last_action = Add;
- int *col_index_ptr;
+ TrilinosWrappers::types::int_type *col_index_ptr;
- TrilinosScalar const *col_value_ptr;
+ TrilinosScalar *col_value_ptr;
- int n_columns;
+ TrilinosWrappers::types::int_type n_columns;
- // If we don't elide zeros, the pointers
- // are already available...
+ double short_val_array[100];
+ int short_index_array[100];
+ std::vector<TrilinosScalar> long_val_array;
+ std::vector<int> long_index_array;
+
+ // If we don't elide zeros, the pointers are already available... need to
+ // cast to non-const pointers as that is the format taken by Trilinos (but
+ // we will not modify const data)
if (elide_zero_values == false)
{
- col_index_ptr = (int *)col_indices;
+ col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
- col_value_ptr = values;
+ col_value_ptr = const_cast<TrilinosScalar*>(values);
n_columns = n_cols;
#ifdef DEBUG
- for (unsigned int j=0; j<n_cols; ++j)
+ for (size_type j=0; j<n_cols; ++j)
Assert (numbers::is_finite(values[j]), ExcNumberNotFinite());
#endif
}
}
n_columns = 0;
- for (unsigned int j=0; j<n_cols; ++j)
+ for (size_type j=0; j<n_cols; ++j)
{
const double value = values[j];
+
Assert (numbers::is_finite(value), ExcNumberNotFinite());
if (value != 0)
{
}
}
- Assert(n_columns <= (int)n_cols, ExcInternalError());
+ Assert(n_columns <= (TrilinosWrappers::types::int_type)n_cols, ExcInternalError());
- col_index_ptr = (TrilinosWrappers::types::int_type *)&column_indices[0];
- col_value_ptr = &column_values[0];
}
- // If the calling matrix owns the row to
- // which we want to add values, we
- // can directly call the Epetra_CrsMatrix
- // input function, which is much faster
- // than the Epetra_FECrsMatrix function.
+ // If the calling matrix owns the row to which we want to add values, we
+ // can directly call the Epetra_CrsMatrix input function, which is much
+ // faster than the Epetra_FECrsMatrix function.
- if (row_partitioner().MyGID(static_cast<int>(row)) == true)
+ if (row_partitioner().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
{
ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row, n_columns,
- const_cast<double *>(col_value_ptr),
+ col_value_ptr,
col_index_ptr);
}
else
{
- // When we're at off-processor data, we
- // have to stick with the standard
- // SumIntoGlobalValues
- // function. Nevertheless, the way we
- // call it is the fastest one (any other
- // will lead to repeated allocation and
- // deallocation of memory in order to
- // call the function we already use,
- // which is very unefficient if writing
- // one element at a time).
+ // When we're at off-processor data, we have to stick with the
+ // standard SumIntoGlobalValues function. Nevertheless, the way we
+ // call it is the fastest one (any other will lead to repeated
+ // allocation and deallocation of memory in order to call the function
+ // we already use, which is very inefficient if writing one element at
+ // a time).
compressed = false;
- ierr = matrix->SumIntoGlobalValues (1, (int *)&row, n_columns,
+ ierr = matrix->SumIntoGlobalValues (1,
+ (TrilinosWrappers::types::int_type *)&row, n_columns,
col_index_ptr,
&col_value_ptr,
Epetra_FECrsMatrix::ROW_MAJOR);
Assert (&src != &dst, ExcSourceEqualsDestination());
Assert (matrix->Filled(), ExcMatrixNotCompressed());
- #ifndef DEAL_II_USE_LARGE_INDEX_TYPE
- const int n_global_elements = matrix->DomainMap().NumGlobalElements();
- #else
- const long long int n_global_elements = matrix->DomainMap().NumGlobalElements64();
- #endif
-
- AssertDimension (static_cast<size_type>(matrix->DomainMap().NumMyElements()),
- static_cast<size_type>(n_global_elements));
- AssertDimension (dst.size(), static_cast<size_type>(matrix->RangeMap().NumMyElements()));
- AssertDimension (src.size(), static_cast<size_type>(matrix->DomainMap().NumMyElements()));
+ internal::SparseMatrix::check_vector_map_equality(*matrix, src, dst);
- const int dst_local_size = dst.end() - dst.begin();
- AssertDimension (dst_local_size, matrix->RangeMap().NumMyElements());
- const int src_local_size = src.end() - src.begin();
- AssertDimension (src_local_size, matrix->DomainMap().NumMyElements());
++ const size_type dst_local_size = dst.end() - dst.begin();
++ AssertDimension (dst_local_size, static_cast<size_type>(matrix->RangeMap().NumMyElements()));
++ const size_type src_local_size = src.end() - src.begin();
++ AssertDimension (src_local_size, static_cast<size_type>(matrix->DomainMap().NumMyElements()));
Epetra_Vector tril_dst (View, matrix->RangeMap(), dst.begin());
Epetra_Vector tril_src (View, matrix->DomainMap(),
Assert (&src != &dst, ExcSourceEqualsDestination());
Assert (matrix->Filled(), ExcMatrixNotCompressed());
- #ifndef DEAL_II_USE_LARGE_INDEX_TYPE
- const int n_global_elements = matrix->DomainMap().NumGlobalElements();
- #else
- const long long int n_global_elements = matrix->DomainMap().NumGlobalElements64();
- #endif
-
- AssertDimension (static_cast<size_type>(matrix->DomainMap().NumMyElements()),
- static_cast<size_type>(n_global_elements));
- AssertDimension (dst.size(), static_cast<size_type>(matrix->DomainMap().NumMyElements()));
- AssertDimension (src.size(), static_cast<size_type>(matrix->RangeMap().NumMyElements()));
+ internal::SparseMatrix::check_vector_map_equality(*matrix, dst, src);
- const int dst_local_size = dst.end() - dst.begin();
- AssertDimension (dst_local_size, matrix->DomainMap().NumMyElements());
- const int src_local_size = src.end() - src.begin();
- AssertDimension (src_local_size, matrix->RangeMap().NumMyElements());
++ const size_type dst_local_size = dst.end() - dst.begin();
++ AssertDimension (dst_local_size, static_cast<size_type>(matrix->DomainMap().NumMyElements()));
++ const size_type src_local_size = src.end() - src.begin();
++ AssertDimension (src_local_size, static_cast<size_type>(matrix->RangeMap().NumMyElements()));
Epetra_Vector tril_dst (View, matrix->DomainMap(), dst.begin());
Epetra_Vector tril_src (View, matrix->RangeMap(),
* <tt>C</tt> standard libraries
* <tt>vector<...></tt> class.
*/
- typedef TrilinosScalar value_type;
- typedef TrilinosScalar real_type;
- typedef dealii::types::global_dof_index size_type;
- typedef internal::VectorReference reference;
+ typedef TrilinosScalar value_type;
+ typedef TrilinosScalar real_type;
- typedef std::size_t size_type;
++ typedef dealii::types::global_dof_index size_type;
+ typedef value_type *iterator;
+ typedef const value_type *const_iterator;
+ typedef internal::VectorReference reference;
typedef const internal::VectorReference const_reference;
/**
* elements sits on another
* process.
*/
- TrilinosScalar el (const unsigned int index) const;
+ TrilinosScalar el (const size_type index) const;
+ /**
+ * Make the Vector class a bit like the <tt>vector<></tt> class of
+ * the C++ standard library by returning iterators to the start and end
+ * of the locally owned elements of this vector. The ordering of local elements corresponds to the one given
+ *
+ * It holds that end() - begin() == local_size().
+ */
+ iterator begin ();
+
+ /**
+ * Return constant iterator to the start of the locally owned elements
+ * of the vector.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Return an iterator pointing to the element past the end of the array
+ * of locally owned entries.
+ */
+ iterator end ();
+
+ /**
+ * Return a constant iterator pointing to the element past the end of
+ * the array of the locally owned entries.
+ */
+ const_iterator end () const;
+
/**
* A collective set operation:
* instead of setting individual
"to imposing Dirichlet values on the vector-valued "
"quantity."));
- std::vector<unsigned int> face_dofs;
+ std::vector<types::global_dof_index> face_dofs;
- // create FE and mapping
- // collections for all elements in
- // use by this DoFHandler
+ // create FE and mapping collections for all elements in use by this
+ // DoFHandler
hp::FECollection<dim,spacedim> fe_collection (dof_handler.get_fe());
hp::MappingCollection<dim,spacedim> mapping_collection;
for (unsigned int i=0; i<fe_collection.size(); ++i)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
- // simply map this function to the
- // other @p{reinit} function
+ // simply map this function to the other @p{reinit} function
- const std::vector<unsigned int> row_lengths (m, max_per_row);
+ const std::vector<size_type> row_lengths (m, max_per_row);
reinit (m, n, row_lengths, chunk_size);
}
this->chunk_size = chunk_size;
- // pass down to the necessary information
- // to the underlying object. we need to
- // calculate how many chunks we need: we
- // need to round up (m/chunk_size) and
- // (n/chunk_size). rounding up in integer
- // arithmetic equals
+ // pass down to the necessary information to the underlying object. we need
+ // to calculate how many chunks we need: we need to round up (m/chunk_size)
+ // and (n/chunk_size). rounding up in integer arithmetic equals
// ((m+chunk_size-1)/chunk_size):
- const unsigned int m_chunks = (m+chunk_size-1) / chunk_size,
- n_chunks = (n+chunk_size-1) / chunk_size;
+ const size_type m_chunks = (m+chunk_size-1) / chunk_size,
+ n_chunks = (n+chunk_size-1) / chunk_size;
- // compute the maximum number of chunks in
- // each row. the passed array denotes the
- // number of entries in each row of the big
- // matrix -- in the worst case, these are
- // all in independent chunks, so we have to
- // calculate it as follows (as an example:
- // let chunk_size==2,
- // row_lengths={2,2,...}, and entries in
- // row zero at columns {0,2} and for row
- // one at {4,6} --> we'll need 4 chunks for
- // the first chunk row!) :
+ // compute the maximum number of chunks in each row. the passed array
+ // denotes the number of entries in each row of the big matrix -- in the
+ // worst case, these are all in independent chunks, so we have to calculate
+ // it as follows (as an example: let chunk_size==2, row_lengths={2,2,...},
+ // and entries in row zero at columns {0,2} and for row one at {4,6} -->
+ // we'll need 4 chunks for the first chunk row!) :
std::vector<unsigned int> chunk_row_lengths (m_chunks, 0);
- for (unsigned int i=0; i<m; ++i)
+ for (size_type i=0; i<m; ++i)
chunk_row_lengths[i/chunk_size] += row_lengths[i];
+ // for the case that the reduced sparsity pattern optimizes the diagonal but
+ // the actual sparsity pattern does not, need to take one more entry in the
+ // row to fit the user-required entry
+ if (m != n && m_chunks == n_chunks)
+ for (unsigned int i=0; i<m_chunks; ++i)
+ ++chunk_row_lengths[i];
+
sparsity_pattern.reinit (m_chunks,
n_chunks,
chunk_row_lengths);
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
- // count number of entries per row, then
- // initialize the underlying sparsity
+ // count number of entries per row, then initialize the underlying sparsity
// pattern
- std::vector<unsigned int> entries_per_row (csp.n_rows(), 0);
- for (unsigned int row = 0; row<csp.n_rows(); ++row)
+ std::vector<size_type> entries_per_row (csp.n_rows(), 0);
+ for (size_type row = 0; row<csp.n_rows(); ++row)
entries_per_row[row] = csp.row_length(row);
reinit (csp.n_rows(), csp.n_cols(),
- void
- ChunkSparsityPattern::symmetrize ()
-unsigned int
-ChunkSparsityPattern::row_length (const unsigned int i) const
++ChunkSparsityPattern::size_type
++ChunkSparsityPattern::row_length (const size_type i) const
{
- // matrix must be square. note that the for
- // some matrix sizes, the current sparsity
- // pattern may not be square even if the
- // underlying sparsity pattern is (e.g. a
- // 10x11 matrix with chunk_size 4)
- Assert (rows==cols, ExcNotQuadratic());
+ Assert (i<rows, ExcIndexRange(i,0,rows));
- sparsity_pattern.symmetrize ();
+ // find out if we did padding and if this row is affected by it
+ if (n_cols() % chunk_size == 0)
+ return sparsity_pattern.row_length (i/chunk_size) * chunk_size;
+ else
+ // if columns don't align, then just iterate over all chunks and see
+ // what this leads to
+ {
+ SparsityPattern::const_iterator p = sparsity_pattern.begin(i/chunk_size),
+ end = sparsity_pattern.end(i/chunk_size);
+ unsigned int n = 0;
+ for ( ; p != end; ++p)
+ if (p->column() != sparsity_pattern.n_cols() - 1)
+ n += chunk_size;
+ else
+ n += (n_cols() % chunk_size);
+ return n;
+ }
}
&&
(n_cols() % chunk_size == 0))
{
- // columns align with chunks, but
- // not rows
+ // columns align with chunks, but not rows
- unsigned int n = sparsity_pattern.n_nonzero_elements() *
- chunk_size *
- chunk_size;
+ size_type n = sparsity_pattern.n_nonzero_elements() *
+ chunk_size *
+ chunk_size;
n -= (sparsity_pattern.n_rows() * chunk_size - n_rows()) *
sparsity_pattern.row_length(sparsity_pattern.n_rows()-1) *
chunk_size;
else
{
- // if columns don't align, then
- // just iterate over all chunks and
- // see what this leads to. follow the advice in the documentation of
- // the sparsity pattern iterators to do the loop over individual rows,
+ // if columns don't align, then just iterate over all chunks and see
+ // what this leads to. follow the advice in the documentation of the
+ // sparsity pattern iterators to do the loop over individual rows,
// rather than all elements
- unsigned int n = 0;
+ size_type n = 0;
- for (unsigned int row = 0; row < sparsity_pattern.n_rows(); ++row)
+ for (size_type row = 0; row < sparsity_pattern.n_rows(); ++row)
{
SparsityPattern::const_iterator p = sparsity_pattern.begin(row);
for (; p!=sparsity_pattern.end(row); ++p)
AssertThrow (out, ExcIO());
- // for each entry in the underlying
- // sparsity pattern, repeat everything
+ // for each entry in the underlying sparsity pattern, repeat everything
// chunk_size x chunk_size times
- for (unsigned int i=0; i<sparsity_pattern.rows; ++i)
- for (unsigned int j=sparsity_pattern.rowstart[i];
+ for (size_type i=0; i<sparsity_pattern.rows; ++i)
+ for (size_type j=sparsity_pattern.rowstart[i];
j<sparsity_pattern.rowstart[i+1]; ++j)
if (sparsity_pattern.colnums[j] != sparsity_pattern.invalid_entry)
- for (unsigned int d=0;
+ for (size_type d=0;
((d<chunk_size) &&
(sparsity_pattern.colnums[j]*chunk_size+d < n_cols()));
++d)
- for (unsigned int e=0;
+ for (size_type e=0;
(e<chunk_size) && (i*chunk_size + e < n_rows());
++e)
- // while matrix entries are
- // usually written (i,j), with i
- // vertical and j horizontal,
- // gnuplot output is x-y, that is
- // we have to exchange the order
- // of output
+ // while matrix entries are usually written (i,j), with i vertical
+ // and j horizontal, gnuplot output is x-y, that is we have to
+ // exchange the order of output
out << sparsity_pattern.colnums[j]*chunk_size+d << " "
<< -static_cast<signed int>(i*chunk_size+e)
<< std::endl;
-unsigned int
+ChunkSparsityPattern::size_type
ChunkSparsityPattern::bandwidth () const
{
- // calculate the bandwidth from that of the
- // underlying sparsity pattern. note that
- // even if the bandwidth of that is zero,
- // then the bandwidth of the chunky pattern
- // is chunk_size-1, if it is 1 then the
- // chunky pattern has
- // chunk_size+(chunk_size-1), etc
+ // calculate the bandwidth from that of the underlying sparsity
+ // pattern. note that even if the bandwidth of that is zero, then the
+ // bandwidth of the chunky pattern is chunk_size-1, if it is 1 then the
+ // chunky pattern has chunk_size+(chunk_size-1), etc
//
// we'll cut it off at max(n(),m())
return std::min (sparsity_pattern.bandwidth()*chunk_size
// explicit instantiations
template
+ void ChunkSparsityPattern::copy_from<SparsityPattern> (const SparsityPattern &,
+ const unsigned int,
+ const bool);
+ template
void ChunkSparsityPattern::copy_from<CompressedSparsityPattern> (const CompressedSparsityPattern &,
- const unsigned int,
+ const size_type ,
const bool);
template
void ChunkSparsityPattern::copy_from<CompressedSetSparsityPattern> (const CompressedSetSparsityPattern &,
const bool);
template
void ChunkSparsityPattern::copy_from<CompressedSimpleSparsityPattern> (const CompressedSimpleSparsityPattern &,
- const unsigned int,
+ const size_type ,
const bool);
template
+ void ChunkSparsityPattern::create_from<SparsityPattern>
+ (const unsigned int,
+ const unsigned int,
+ const SparsityPattern &,
+ const unsigned int,
+ const bool);
+ template
+ void ChunkSparsityPattern::create_from<CompressedSparsityPattern>
+ (const unsigned int,
+ const unsigned int,
+ const CompressedSparsityPattern &,
+ const unsigned int,
+ const bool);
+ template
+ void ChunkSparsityPattern::create_from<CompressedSetSparsityPattern>
+ (const unsigned int,
+ const unsigned int,
+ const CompressedSetSparsityPattern &,
+ const unsigned int,
+ const bool);
+ template
+ void ChunkSparsityPattern::create_from<CompressedSimpleSparsityPattern>
+ (const unsigned int,
+ const unsigned int,
+ const CompressedSimpleSparsityPattern &,
+ const unsigned int,
+ const bool);
+ template
void ChunkSparsityPattern::copy_from<float> (const FullMatrix<float> &,
- const unsigned int,
+ const size_type ,
const bool);
template
void ChunkSparsityPattern::copy_from<double> (const FullMatrix<double> &,
struct CopyData
{
- std::vector<unsigned int> dof_indices;
+ std::vector<types::global_dof_index> dof_indices;
FullMatrix<double> cell_matrix;
dealii::Vector<double> cell_rhs;
+ const ConstraintMatrix *constraints;
};
}
matrix.set(global, global+N, rand());
}
}
- matrix.compress ();
+ matrix.compress (VectorOperation::insert);
-
+
// then do a single matrix-vector
// multiplication with subsequent formation
// of the matrix norm