const bool optimize_diagonal = true);
/**
- * Copy data from an object of
- * type
- * CompressedSparsityPattern.
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
- * compressed mode afterwards.
- */
- void copy_from (const CompressedSparsityPattern &csp,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
-
- /**
- * Copy data from an object of
- * type
- * CompressedSetSparsityPattern.
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
+ * Copy data from an object of type
+ * CompressedSparsityPattern,
+ * CompressedSetSparsityPattern or
+ * CompressedSimpleSparsityPattern.
+ * Previous content of this object is
+ * lost, and the sparsity pattern is in
* compressed mode afterwards.
*/
- void copy_from (const CompressedSetSparsityPattern &csp,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
+ template <typename SparsityType>
+ void copy_from (const SparsityType &csp,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
/**
* Take a full matrix and use its
}
+
inline
CompressedSetSparsityPattern::row_iterator
CompressedSetSparsityPattern::row_begin (const unsigned int row) const
}
+
inline
CompressedSetSparsityPattern::row_iterator
CompressedSetSparsityPattern::row_end (const unsigned int row) const
class CompressedSimpleSparsityPattern : public Subscriptor
{
public:
+ /**
+ * An iterator that can be used to
+ * iterate over the elements of a single
+ * row. The result of dereferencing such
+ * an iterator is a column index.
+ */
+ typedef std::vector<unsigned int>::const_iterator row_iterator;
+
/**
* Initialize the matrix empty,
* that is with no memory
unsigned int column_number (const unsigned int row,
const unsigned int index) const;
+ /**
+ * Return an iterator that can loop over
+ * all entries in the given
+ * row. Dereferencing the iterator yields
+ * a column index.
+ */
+ row_iterator row_begin (const unsigned int row) const;
+
+ /**
+ * Returns the end of the current row.
+ */
+ row_iterator row_end (const unsigned int row) const;
/**
* Compute the bandwidth of the matrix
* represented by this structure. The
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
- if (!rowset.is_element(i))
+ if (rowset.size() > 0 && !rowset.is_element(i))
return;
- lines[rowset.index_within_set(i)].add (j);
+ const unsigned int rowindex =
+ rowset.size()==0 ? i : rowset.nth_index_in_set(i);
+ lines[rowindex].add (j);
}
{
Assert (row < rows, ExcIndexRange (row, 0, rows));
- if (!rowset.is_element(row))
+ if (rowset.size() > 0 && !rowset.is_element(row))
return;
- lines[rowset.index_within_set(row)].add_entries (begin, end, indices_are_sorted);
+ const unsigned int rowindex =
+ rowset.size()==0 ? row : rowset.nth_index_in_set(row);
+ lines[rowindex].add_entries (begin, end, indices_are_sorted);
}
CompressedSimpleSparsityPattern::row_length (const unsigned int row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
- Assert( rowset.is_element(row), ExcInternalError());
+ if (rowset.size() > 0 && !rowset.is_element(row))
+ return 0;
- return lines[rowset.index_within_set(row)].entries.size();
+ const unsigned int rowindex =
+ rowset.size()==0 ? row : rowset.nth_index_in_set(row);
+ return lines[rowindex].entries.size();
}
const unsigned int index) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
- Assert (index < lines[rowset.index_within_set(row)].entries.size(),
- ExcIndexRange (index, 0, lines[rowset.index_within_set(row)].entries.size()));
- Assert( rowset.is_element(row), ExcInternalError());
+ Assert( rowset.size() == 0 || rowset.is_element(row), ExcInternalError());
+
+ const unsigned int local_row = rowset.size() ? rowset.index_within_set(row) : row;
+ Assert (index < lines[local_row].entries.size(),
+ ExcIndexRange (index, 0, lines[local_row].entries.size()));
+ return lines[local_row].entries[index];
+}
+
+
+
+inline
+CompressedSimpleSparsityPattern::row_iterator
+CompressedSimpleSparsityPattern::row_begin (const unsigned int row) const
+{
+ Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
+ const unsigned int local_row = rowset.size() ? rowset.index_within_set(row) : row;
+ return lines[local_row].entries.begin();
+}
+
- return lines[rowset.index_within_set(row)].entries[index];
+
+inline
+CompressedSimpleSparsityPattern::row_iterator
+CompressedSimpleSparsityPattern::row_end (const unsigned int row) const
+{
+ Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
+ const unsigned int local_row = rowset.size() ? rowset.index_within_set(row) : row;
+ return lines[local_row].entries.end();
}
+
inline
const IndexSet &
CompressedSimpleSparsityPattern::row_index_set () const
class CompressedSparsityPattern : public Subscriptor
{
public:
+ /**
+ * An iterator that can be used to
+ * iterate over the elements of a single
+ * row. The result of dereferencing such
+ * an iterator is a column index.
+ */
+ typedef std::vector<unsigned int>::const_iterator row_iterator;
+
/**
* Initialize the matrix empty,
* that is with no memory
* the reinit() function.
*/
CompressedSparsityPattern ();
-
+
/**
* Copy constructor. This constructor is
* only allowed to be called if the
*/
CompressedSparsityPattern (const unsigned int m,
const unsigned int n);
-
+
/**
* Initialize a square matrix of
* dimension @p n.
* objects.
*/
CompressedSparsityPattern & operator = (const CompressedSparsityPattern &);
-
+
/**
* Reallocate memory and set up
* data structures for a new
*/
void reinit (const unsigned int m,
const unsigned int n);
-
+
/**
* Since this object is kept
* compressed at all times anway,
* SparsityPattern class.
*/
void compress ();
-
+
/**
* Return whether the object is
* empty. It is empty if no
* matrix. If the entry already
* exists, nothing bad happens.
*/
- void add (const unsigned int i,
+ void add (const unsigned int i,
const unsigned int j);
/**
* happens.
*/
template <typename ForwardIterator>
- void add_entries (const unsigned int row,
+ void add_entries (const unsigned int row,
ForwardIterator begin,
ForwardIterator end,
const bool indices_are_unique_and_sorted = false);
*/
bool exists (const unsigned int i,
const unsigned int j) const;
-
+
/**
* Make the sparsity pattern
* symmetric by adding the
* square matrix.
*/
void symmetrize ();
-
+
/**
* Print the sparsity of the
* matrix. The output consists of
unsigned int column_number (const unsigned int row,
const unsigned int index) const;
+ /**
+ * Return an iterator that can loop over
+ * all entries in the given
+ * row. Dereferencing the iterator yields
+ * a column index.
+ */
+ row_iterator row_begin (const unsigned int row) const;
+
+ /**
+ * Returns the end of the current row.
+ */
+ row_iterator row_end (const unsigned int row) const;
+
/**
* Compute the bandwidth of the matrix
* represented by this structure. The
*/
static
bool stores_only_added_elements ();
-
+
private:
/**
* Number of rows that this sparsity
/**
* Size of the cache.
*/
- static const unsigned int cache_size = 8;
+ static const unsigned int cache_size = 8;
public:
/**
*/
void flush_cache () const;
};
-
-
+
+
/**
* Actual data: store for each
* row the set of nonzero
// the cache first
if (cache_entries == cache_size && cache_entries != 0)
flush_cache ();
-
+
cache[cache_entries] = j;
++cache_entries;
}
+inline
+CompressedSparsityPattern::row_iterator
+CompressedSparsityPattern::row_begin (const unsigned int row) const
+{
+ Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
+
+ if (lines[row].cache_entries != 0)
+ lines[row].flush_cache ();
+ return lines[row].entries.begin();
+}
+
+
+
+inline
+CompressedSparsityPattern::row_iterator
+CompressedSparsityPattern::row_end (const unsigned int row) const
+{
+ Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
+ return lines[row].entries.end();
+}
+
+
+
inline
bool
CompressedSparsityPattern::stores_only_added_elements ()
Assert (cols->compressed || cols->empty(), SparsityPattern::ExcNotCompressed());
if (val)
- std::fill_n (&val[0], cols->n_nonzero_elements(), 0.);
+ memset (&val[0], cols->n_nonzero_elements()*sizeof(number), 0);
return *this;
}
if (len==0)
return first;
-
+
while (true)
{
// if length equals 8 or less,
Assert (false, ExcInternalError());
}
}
-
-
-
+
+
+
const unsigned int half = len >> 1;
const unsigned int * const middle = first + half;
-
+
// if the value is larger than
// that pointed to by the
// middle pointer, then the
*/
unsigned int
get_column_index_from_iterator (const unsigned int i);
-
+
/**
* Helper function to get the
* column index from a
template <typename value>
unsigned int
get_column_index_from_iterator (const std::pair<const unsigned int, value> &i);
-
+
}
}
-
+
namespace SparsityPatternIterators
{
// forward declaration
class Iterator;
-
+
/**
* Accessor class for iterators into
* sparsity patterns. This class is also
/** @addtogroup Exceptions
* @{ */
-
- //@}
+
+ //@}
protected:
/**
* The sparsity pattern we operate on
* nonzero entry in the matrix.
*/
void advance ();
-
+
/**
* Grant access to iterator class.
*/
* Constructor. Create an iterator
* into the sparsity pattern @p sp for the
* given row and the index within it.
- */
+ */
Iterator (const SparsityPattern *sp,
const unsigned int row,
const unsigned int index);
-
+
/**
* Prefix increment.
*/
* accessor class.
*/
Accessor accessor;
- };
+ };
}
typedef
SparsityPatternIterators::Iterator
const_iterator;
-
+
+ /**
+ * Typedef an iterator class that allows
+ * to walk over the nonzero elements of a
+ * row of a sparsity pattern.
+ */
+ typedef
+ const unsigned int * row_iterator;
+
/**
* Typedef an iterator class that allows
* to walk over all nonzero elements of a
typedef
SparsityPatternIterators::Iterator
iterator;
-
-
+
+
/**
* Define a value which is used
* to indicate that a certain
* variable may change over time.
*/
static const unsigned int invalid_entry = numbers::invalid_unsigned_int;
-
+
/**
* Initialize the matrix empty,
* that is with no memory
* the reinit() function.
*/
SparsityPattern ();
-
+
/**
* Copy constructor. This
* constructor is only allowed to
const unsigned int n,
const std::vector<unsigned int>& row_lengths,
const bool optimize_diagonal = true);
-
+
/**
* Initialize a quadratic matrix
* of dimension <tt>n</tt> with
SparsityPattern (const SparsityPattern &original,
const unsigned int max_per_row,
const unsigned int extra_off_diagonals);
-
+
/**
* Destructor.
*/
* objects.
*/
SparsityPattern & operator = (const SparsityPattern &);
-
+
/**
* Reallocate memory and set up data
* structures for a new matrix with
const unsigned int n,
const VectorSlice<const std::vector<unsigned int> > &row_lengths,
const bool optimize_diagonal = true);
-
+
/**
* This function compresses the sparsity
* structure that this object represents.
* iterator for the last row of a matrix.
*/
inline iterator end (const unsigned int r) const;
-
+
+ /**
+ * STL-like iterator with the first entry
+ * of row <tt>r</tt>.
+ *
+ * Note that if the given row is empty,
+ * i.e. does not contain any nonzero
+ * entries, then the iterator returned by
+ * this function equals
+ * <tt>end(r)</tt>. Note also that the
+ * iterator may not be dereferencable in
+ * that case.
+ */
+ inline row_iterator row_begin (const unsigned int r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>. It
+ * points to the first element past the
+ * end of line @p r, or past the end of
+ * the entire sparsity pattern.
+ *
+ * Note that the end iterator is not
+ * necessarily dereferencable. This is in
+ * particular the case if it is the end
+ * iterator for the last row of a matrix.
+ */
+ inline row_iterator row_end (const unsigned int r) const;
+
/**
* This function can be used as a
* replacement for reinit(),
const bool optimize_diagonal = true);
/**
- * Copy data from an object of
- * type
- * CompressedSparsityPattern.
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
- * compressed mode afterwards.
- */
- void copy_from (const CompressedSparsityPattern &csp,
- const bool optimize_diagonal = true);
-
-
- /**
- * Copy data from an object of
- * type
- * CompressedSetSparsityPattern.
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
- * compressed mode afterwards.
- */
- void copy_from (const CompressedSetSparsityPattern &csp,
- const bool optimize_diagonal = true);
-
- /**
- * Copy data from an object of
- * type
+ * Copy data from an object of type
+ * CompressedSparsityPattern,
+ * CompressedSetSparsityPattern or
* CompressedSimpleSparsityPattern.
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
+ * Previous content of this object is
+ * lost, and the sparsity pattern is in
* compressed mode afterwards.
*/
- void copy_from (const CompressedSimpleSparsityPattern &csp,
+ template <typename CompressedSparsityType>
+ void copy_from (const CompressedSparsityType &csp,
const bool optimize_diagonal = true);
-
/**
* Take a full matrix and use its
* nonzero entries to generate a
template <typename number>
void copy_from (const FullMatrix<number> &matrix,
const bool optimize_diagonal = true);
-
+
/**
* Return whether the object is empty. It
* is empty if no memory is allocated,
* @deprecated Use
* SparseMatrix::const_iterator
*/
- unsigned int operator() (const unsigned int i,
+ unsigned int operator() (const unsigned int i,
const unsigned int j) const;
/**
*/
std::pair<unsigned int, unsigned int>
matrix_position (const unsigned int global_index) const;
-
+
/**
* Add a nonzero entry to the matrix.
* This function may only be called
* If the entry already exists, nothing
* bad happens.
*/
- void add (const unsigned int i,
+ void add (const unsigned int i,
const unsigned int j);
/**
* exist, nothing bad happens.
*/
template <typename ForwardIterator>
- void add_entries (const unsigned int row,
+ void add_entries (const unsigned int row,
ForwardIterator begin,
ForwardIterator end,
const bool indices_are_sorted = false);
-
+
/**
* Make the sparsity pattern
* symmetric by adding the
* quadratic matrix.
*/
void symmetrize ();
-
+
/**
* Return number of rows of this
* matrix, which equals the dimension
unsigned int n_nonzero_elements () const;
/**
- * Return whether the structure is
+ * Return whether the structure is
* compressed or not.
*/
bool is_compressed () const;
* template arguments.
*/
bool stores_only_added_elements () const;
-
+
/**
* @deprecated
*
*/
void partition (const unsigned int n_partitions,
std::vector<unsigned int> &partition_indices) const;
-
+
/**
* Write the data of this object
* en bloc to a file. This is
* more.
*/
void block_read (std::istream &in);
-
+
/**
* Print the sparsity of the
* matrix. The output consists of
unsigned int memory_consumption () const;
/**
- * This is kind of an expert mode. Get
+ * This is kind of an expert mode. Get
* access to the rowstart array, but
* read-only.
*
* Is special treatment of
* diagonals enabled?
*/
- bool diagonal_optimized;
-
+ bool diagonal_optimized;
+
/**
* Make all sparse matrices
* friends of this class.
namespace SparsityPatternIterators
-{
+{
inline
Accessor::
Accessor (const SparsityPattern *sparsity_pattern,
Accessor::column() const
{
Assert (is_valid_entry() == true, ExcInvalidIterator());
-
+
return (sparsity_pattern
->get_column_numbers()[sparsity_pattern
->get_rowstart_indices()[a_row]+a_index]);
inline
bool
Accessor::operator == (const Accessor& other) const
- {
+ {
return (sparsity_pattern == other.sparsity_pattern &&
a_row == other.a_row &&
a_index == other.a_index);
inline
bool
Accessor::operator < (const Accessor& other) const
- {
+ {
Assert (sparsity_pattern == other.sparsity_pattern,
ExcInternalError());
-
+
return (a_row < other.a_row ||
(a_row == other.a_row &&
a_index < other.a_index));
}
-
+
inline
void
Accessor::advance ()
- {
+ {
Assert (a_row < sparsity_pattern->n_rows(), ExcIteratorPastEnd());
-
+
++a_index;
// if at end of line: cycle until we
break;
}
}
-
+
inline
{
return accessor < other.accessor;
}
-
+
}
+inline
+SparsityPattern::row_iterator
+SparsityPattern::row_begin (const unsigned int r) const
+{
+ Assert (r<n_rows(), ExcIndexRange(r,0,n_rows()));
+ return &colnums[rowstart[r]];
+}
+
+
+
+inline
+SparsityPattern::row_iterator
+SparsityPattern::row_end (const unsigned int r) const
+{
+ Assert (r<n_rows(), ExcIndexRange(r,0,n_rows()));
+ return &colnums[rowstart[r+1]];
+}
+
+
+
inline
unsigned int
SparsityPattern::n_rows () const
unsigned int
SparsityPattern::n_nonzero_elements () const
{
- Assert ((rowstart!=0) && (colnums!=0), ExcEmptyObject());
+ Assert ((rowstart!=0) && (colnums!=0), ExcEmptyObject());
Assert (compressed, ExcNotCompressed());
return rowstart[rows]-rowstart[0];
}
{
Assert (static_cast<unsigned int>(std::distance (begin, end)) == n_rows,
ExcIteratorRange (std::distance (begin, end), n_rows));
-
+
// first determine row lengths for
// each row. if the matrix is
// quadratic, then we might have to
const unsigned int col
= internal::SparsityPatternTools::get_column_index_from_iterator(*j);
Assert (col < n_cols, ExcIndexRange(col,0,n_cols));
-
+
if ((col!=row) || !is_square)
*cols++ = col;
};
#include <lac/chunk_sparsity_pattern.h>
#include <lac/compressed_sparsity_pattern.h>
#include <lac/compressed_set_sparsity_pattern.h>
+#include <lac/compressed_simple_sparsity_pattern.h>
#include <lac/full_matrix.h>
{
Assert (s.rows == 0, ExcInvalidConstructorCall());
Assert (s.cols == 0, ExcInvalidConstructorCall());
-
+
reinit (0,0,0,0, false);
}
const unsigned int n,
const std::vector<unsigned int>& row_lengths,
const unsigned int chunk_size,
- const bool optimize_diag)
+ const bool optimize_diag)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
const unsigned int m,
const std::vector<unsigned int>& row_lengths,
const unsigned int chunk_size,
- const bool optimize_diag)
+ const bool optimize_diag)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
// perform the checks in the underlying
// object as well
sparsity_pattern = s.sparsity_pattern;
-
+
return *this;
}
const bool optimize_diag)
{
Assert (row_lengths.size() == m, ExcInvalidNumber (m));
- Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
-
+ Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
+
rows = m;
cols = n;
// arithmetic equals
// ((m+chunk_size-1)/chunk_size):
const unsigned int m_chunks = (m+chunk_size-1) / chunk_size,
- n_chunks = (n+chunk_size-1) / chunk_size;
+ n_chunks = (n+chunk_size-1) / chunk_size;
// compute the maximum number of chunks in
// each row. the passed array denotes the
+template <typename SparsityType>
void
-ChunkSparsityPattern::copy_from (const CompressedSparsityPattern &csp,
- const unsigned int chunk_size,
- const bool optimize_diag)
+ChunkSparsityPattern::copy_from (const SparsityType &csp,
+ const unsigned int chunk_size,
+ const bool optimize_diag)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
// pattern
std::vector<unsigned int> entries_per_row (csp.n_rows(), 0);
for (unsigned int row = 0; row<csp.n_rows(); ++row)
- for (unsigned int j=0; j<csp.row_length(row); ++j)
- ++entries_per_row[row];
+ entries_per_row[row] = csp.row_length(row);
reinit (csp.n_rows(), csp.n_cols(),
entries_per_row,
chunk_size, optimize_diag);
-
- // then actually fill it
- for (unsigned int row = 0; row<csp.n_rows(); ++row)
- for (unsigned int j=0; j<csp.row_length(row); ++j)
- add (row, csp.column_number(row,j));
-
- // finally compress
- compress ();
-}
-
-
-
-void
-ChunkSparsityPattern::copy_from (const CompressedSetSparsityPattern &csp,
- const unsigned int chunk_size,
- const bool optimize_diag)
-{
- Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
-
- // count number of entries per row, then
- // initialize the underlying sparsity
- // pattern
- std::vector<unsigned int> entries_per_row (csp.n_rows(), 0);
- for (unsigned int row = 0; row<csp.n_rows(); ++row)
- {
- CompressedSetSparsityPattern::row_iterator col_num = csp.row_begin (row);
- for (; col_num != csp.row_end (row); ++col_num)
- ++entries_per_row[row];
- }
-
- reinit (csp.n_rows(), csp.n_cols(),
- entries_per_row,
- chunk_size, optimize_diag);
-
- // then actually fill it
+ // then actually fill it
for (unsigned int row = 0; row<csp.n_rows(); ++row)
{
- CompressedSetSparsityPattern::row_iterator col_num = csp.row_begin (row);
+ typename SparsityType::row_iterator col_num = csp.row_begin (row);
for (; col_num != csp.row_end (row); ++col_num)
add (row, *col_num);
}
-
+
// finally compress
compress ();
}
reinit (matrix.m(), matrix.n(),
entries_per_row,
chunk_size, optimize_diag);
-
- // then actually fill it
+
+ // then actually fill it
for (unsigned int row=0; row<matrix.m(); ++row)
for (unsigned int col=0; col<matrix.n(); ++col)
if (matrix(row,col) != 0)
unsigned int
-ChunkSparsityPattern::max_entries_per_row () const
+ChunkSparsityPattern::max_entries_per_row () const
{
return sparsity_pattern.max_entries_per_row() * chunk_size;
}
void
-ChunkSparsityPattern::symmetrize ()
+ChunkSparsityPattern::symmetrize ()
{
// matrix must be square. note that the for
// some matrix sizes, the current sparsity
chunk_size;
return n;
}
-
- else
+
+ else
{
// if columns don't align, then
// just iterate over all chunks and
ChunkSparsityPattern::print (std::ostream &out) const
{
Assert ((sparsity_pattern.rowstart!=0) && (sparsity_pattern.colnums!=0),
- ExcEmptyObject());
+ ExcEmptyObject());
AssertThrow (out, ExcIO());
out << ',' << sparsity_pattern.colnums[j]*chunk_size+e;
out << ']' << std::endl;
}
-
+
AssertThrow (out, ExcIO());
}
(sparsity_pattern.colnums!=0), ExcEmptyObject());
AssertThrow (out, ExcIO());
-
+
// for each entry in the underlying
// sparsity pattern, repeat everything
// chunk_size x chunk_size times
void
-ChunkSparsityPattern::block_write (std::ostream &out) const
+ChunkSparsityPattern::block_write (std::ostream &out) const
{
AssertThrow (out, ExcIO());
// then the underlying sparsity pattern
sparsity_pattern.block_write (out);
out << ']';
-
- AssertThrow (out, ExcIO());
+
+ AssertThrow (out, ExcIO());
}
sparsity_pattern.block_read (in);
in >> c;
- AssertThrow (c == ']', ExcIO());
+ AssertThrow (c == ']', ExcIO());
}
// explicit instantiations
template
+void ChunkSparsityPattern::copy_from<CompressedSparsityPattern> (const CompressedSparsityPattern &,
+ const unsigned int,
+ const bool);
+template
+void ChunkSparsityPattern::copy_from<CompressedSetSparsityPattern> (const CompressedSetSparsityPattern &,
+ const unsigned int,
+ const bool);
+template
+void ChunkSparsityPattern::copy_from<CompressedSimpleSparsityPattern> (const CompressedSimpleSparsityPattern &,
+ const unsigned int,
+ const bool);
+template
void ChunkSparsityPattern::copy_from<float> (const FullMatrix<float> &,
const unsigned int,
- bool);
+ const bool);
template
void ChunkSparsityPattern::copy_from<double> (const FullMatrix<double> &,
const unsigned int,
- bool);
+ const bool);
DEAL_II_NAMESPACE_CLOSE
// actually doing something.
ForwardIterator my_it = begin;
unsigned int col = *my_it;
- std::vector<unsigned int>::iterator it =
+ std::vector<unsigned int>::iterator it =
std::lower_bound(entries.begin(), entries.end(), col);
- while (*it == col)
+ while (*it == col)
{
++my_it;
if (my_it == end)
entries.push_back(col);
it = entries.end()-1;
}
- else {
+ else {
// do a binary search to find the place
// where to insert:
- it2 = std::lower_bound(entries.begin(), entries.end(), col);
+ it2 = std::lower_bound(entries.begin(), entries.end(), col);
// If this entry is a duplicate, continue
// immediately Insert at the right place
return entries.capacity()*sizeof(unsigned int)+sizeof(Line);
}
-
+
CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern ()
:
rows(0),
CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const unsigned int m,
const unsigned int n,
const IndexSet & rowset_
-)
+)
:
rows(0),
cols(0),
rows = m;
cols = n;
rowset=rowset_;
- if (rowset.size()==0)
- {
- rowset.set_size(m);
- rowset.add_range(0,m);
- }
-
- Assert( rowset.size()==m, ExcInternalError());
- std::vector<Line> new_lines (rowset.n_elements());
+ std::vector<Line> new_lines (rowset.size()==0 ? rows : rowset.n_elements());
lines.swap (new_lines);
}
CompressedSimpleSparsityPattern::max_entries_per_row () const
{
unsigned int m = 0;
- for (unsigned int i=0; i<rowset.n_elements(); ++i)
+ for (unsigned int i=0; i<lines.size(); ++i)
{
m = std::max (m, static_cast<unsigned int>(lines[i].entries.size()));
}
-
+
return m;
}
-bool
+bool
CompressedSimpleSparsityPattern::exists (const unsigned int i,
const unsigned int j) const
{
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
- Assert( rowset.is_element(i), ExcInternalError());
+ Assert( rowset.size()==0 || rowset.is_element(i), ExcInternalError());
+
+ const unsigned int rowindex =
+ rowset.size()==0 ? i : rowset.index_within_set(i);
- unsigned int rowindex = rowset.index_within_set(i);
-
return std::binary_search (lines[rowindex].entries.begin(),
lines[rowindex].entries.end(),
j);
// 2. that the @p{add} function can
// be called on elements that
// already exist without any harm
- for (unsigned int row=0; row<rows; ++row)
+ for (unsigned int row=0; row<lines.size(); ++row)
{
- unsigned int rowindex = rowset.nth_index_in_set(row);
+ const unsigned int rowindex =
+ rowset.size()==0 ? row : rowset.nth_index_in_set(row);
for (std::vector<unsigned int>::const_iterator
j=lines[row].entries.begin();
void
CompressedSimpleSparsityPattern::print (std::ostream &out) const
{
- for (unsigned int row=0; row<rows; ++row)
+ for (unsigned int row=0; row<lines.size(); ++row)
{
- out << '[' << rowset.nth_index_in_set(row);
+ out << '[' << (rowset.size()==0 ? row : rowset.nth_index_in_set(row));
for (std::vector<unsigned int>::const_iterator
j=lines[row].entries.begin();
void
CompressedSimpleSparsityPattern::print_gnuplot (std::ostream &out) const
-{
+{
for (unsigned int row=0; row<rows; ++row)
{
- unsigned int rowindex = rowset.nth_index_in_set(row);
+ const unsigned int rowindex =
+ rowset.size()==0 ? row : rowset.nth_index_in_set(row);
for (std::vector<unsigned int>::const_iterator
j=lines[row].entries.begin();
<< -static_cast<signed int>(rowindex)
<< std::endl;
}
-
+
AssertThrow (out, ExcIO());
}
unsigned int b=0;
for (unsigned int row=0; row<rows; ++row)
{
- unsigned int rowindex = rowset.nth_index_in_set(row);
+ const unsigned int rowindex =
+ rowset.size()==0 ? row : rowset.nth_index_in_set(row);
for (std::vector<unsigned int>::const_iterator
j=lines[row].entries.begin();
if (static_cast<unsigned int>(std::abs(static_cast<int>(rowindex-*j))) > b)
b = std::abs(static_cast<signed int>(rowindex-*j));
}
-
+
return b;
}
{
n += lines[i].entries.size();
}
-
+
return n;
}
//TODO: IndexSet...
unsigned int mem = sizeof(CompressedSimpleSparsityPattern);
for (unsigned int i=0; i<lines.size(); ++i)
- mem += MemoryConsumption::memory_consumption (lines[i]);
+ mem += MemoryConsumption::memory_consumption (lines[i]);
return mem;
}
+template <typename CSP>
void
-SparsityPattern::copy_from (const CompressedSparsityPattern &csp,
+SparsityPattern::copy_from (const CSP &csp,
const bool optimize_diag)
{
// first determine row lengths for
// bother to check whether that
// diagonal entry is in a certain
// row or not
- const bool is_square = optimize_diag && (csp.n_rows() == csp.n_cols());
+ const bool do_diag_optimize = optimize_diag && (csp.n_rows() == csp.n_cols());
std::vector<unsigned int> row_lengths (csp.n_rows());
for (unsigned int i=0; i<csp.n_rows(); ++i)
{
unsigned int additional_diagonal = 0;
- if (is_square)
+ if (do_diag_optimize)
if (csp.exists(i,i) == false)
additional_diagonal = 1;
row_lengths[i] = csp.row_length(i) + additional_diagonal;
}
-
- reinit (csp.n_rows(), csp.n_cols(), row_lengths, is_square);
-
- // now enter all the elements into
- // the matrix. note that if the
- // matrix is quadratic, then we
- // already have the diagonal
- // element preallocated
- for (unsigned int row = 0; row<csp.n_rows(); ++row)
- {
- unsigned int *cols = &colnums[rowstart[row]] + (is_square ? 1 : 0);
- const unsigned int row_length = csp.row_length(row);
- for (unsigned int j=0; j<row_length; ++j)
- {
- const unsigned int col = csp.column_number(row,j);
- Assert (col < csp.n_cols(), ExcIndexRange(col,0,csp.n_cols()));
-
- if ((col!=row) || !is_square)
- *cols++ = col;
- }
- }
-
- // do not need to compress the sparsity
- // pattern since we already have
- // allocated the right amount of data,
- // and the CSP data is sorted, too.
- compressed = true;
-}
-
-
-
-void
-SparsityPattern::copy_from (const CompressedSetSparsityPattern &csp,
- const bool optimize_diag)
-{
- // first determine row lengths for
- // each row. if the matrix is
- // quadratic, then we might have to
- // add an additional entry for the
- // diagonal, if that is not yet
- // present. as we have to call
- // compress anyway later on, don't
- // bother to check whether that
- // diagonal entry is in a certain
- // row or not
- const bool is_square = optimize_diag && (csp.n_rows() == csp.n_cols());
- std::vector<unsigned int> row_lengths (csp.n_rows());
- for (unsigned int i=0; i<csp.n_rows(); ++i)
- {
- unsigned int additional_diagonal = 0;
- if (is_square)
- if (csp.exists(i,i) == false)
- additional_diagonal = 1;
-
- row_lengths[i] = csp.row_length(i) + additional_diagonal;
- }
- reinit (csp.n_rows(), csp.n_cols(), row_lengths, is_square);
+ reinit (csp.n_rows(), csp.n_cols(), row_lengths, do_diag_optimize);
// now enter all the elements into
// the matrix. note that if the
// element preallocated
for (unsigned int row = 0; row<csp.n_rows(); ++row)
{
- unsigned int *cols = &colnums[rowstart[row]] + (is_square ? 1 : 0);
- CompressedSetSparsityPattern::row_iterator col_num = csp.row_begin (row);
+ unsigned int *cols = &colnums[rowstart[row]] + (do_diag_optimize ? 1 : 0);
+ typename CSP::row_iterator col_num = csp.row_begin (row),
+ end_row = csp.row_end (row);
- for (; col_num != csp.row_end (row); ++col_num)
+ for (; col_num != end_row; ++col_num)
{
const unsigned int col = *col_num;
- if ((col!=row) || !is_square)
+ if ((col!=row) || !do_diag_optimize)
*cols++ = col;
}
}
compressed = true;
}
-void
-SparsityPattern::copy_from (const CompressedSimpleSparsityPattern &csp,
- const bool optimize_diag)
-{
- // first determine row lengths for
- // each row. if the matrix is
- // quadratic, then we might have to
- // add an additional entry for the
- // diagonal, if that is not yet
- // present. as we have to call
- // compress anyway later on, don't
- // bother to check whether that
- // diagonal entry is in a certain
- // row or not
- const bool is_square = optimize_diag && (csp.n_rows() == csp.n_cols());
- std::vector<unsigned int> row_lengths (csp.n_rows());
- for (unsigned int i=0; i<csp.n_rows(); ++i)
- {
- unsigned int additional_diagonal = 0;
- if (is_square)
- if (csp.exists(i,i) == false)
- additional_diagonal = 1;
-
- row_lengths[i] = csp.row_length(i) + additional_diagonal;
- }
- reinit (csp.n_rows(), csp.n_cols(), row_lengths, is_square);
-
- // now enter all the elements into
- // the matrix. note that if the
- // matrix is quadratic, then we
- // already have the diagonal
- // element preallocated
- for (unsigned int row = 0; row<csp.n_rows(); ++row)
- {
- unsigned int *cols = &colnums[rowstart[row]] + (is_square ? 1 : 0);
- const unsigned int row_length = csp.row_length(row);
- for (unsigned int j=0; j<row_length; ++j)
- {
- const unsigned int col = csp.column_number(row,j);
- Assert (col < csp.n_cols(), ExcIndexRange(col,0,csp.n_cols()));
-
- if ((col!=row) || !is_square)
- *cols++ = col;
- }
- }
-
- // do not need to compress the sparsity
- // pattern since we already have
- // allocated the right amount of data,
- // and the CSP data is sorted, too.
- compressed = true;
-}
-
template <typename number>
// explicit instantiations
+template void SparsityPattern::copy_from<CompressedSparsityPattern> (const CompressedSparsityPattern &, bool);
+template void SparsityPattern::copy_from<CompressedSetSparsityPattern> (const CompressedSetSparsityPattern &, bool);
+template void SparsityPattern::copy_from<CompressedSimpleSparsityPattern> (const CompressedSimpleSparsityPattern &, bool);
template void SparsityPattern::copy_from<float> (const FullMatrix<float> &, bool);
template void SparsityPattern::copy_from<double> (const FullMatrix<double> &, bool);
static_cast<unsigned int>(input_col_map.NumGlobalElements()),
ExcDimensionMismatch (sp.n_cols(),
input_col_map.NumGlobalElements()));
- Assert (exchange_data == false, ExcNotImplemented());
column_space_map = std::auto_ptr<Epetra_Map> (new Epetra_Map (input_col_map));
graph.reset();
ExcDimensionMismatch (graph->NumGlobalRows(),
sp.n_rows()));
- std::vector<int> row_indices;
const unsigned int n_rows = sp.n_rows();
-
- for (unsigned int row=0; row<n_rows; ++row)
- if ( input_row_map.MyGID(row) )
- {
- const int row_length = sp.row_length(row);
- row_indices.resize (row_length, -1);
-
- for (int col=0; col < row_length; ++col)
- row_indices[col] = sp.column_number (row, col);
-
- graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
- &row_indices[0]);
- }
-
- compress();
- }
-
-
-
- template <>
- void
- SparsityPattern::reinit (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
- const CompressedSimpleSparsityPattern &sp,
- const bool exchange_data)
- {
- Assert (sp.n_rows() ==
- static_cast<unsigned int>(input_row_map.NumGlobalElements()),
- ExcDimensionMismatch (sp.n_rows(),
- input_row_map.NumGlobalElements()));
- Assert (sp.n_cols() ==
- static_cast<unsigned int>(input_col_map.NumGlobalElements()),
- ExcDimensionMismatch (sp.n_cols(),
- input_col_map.NumGlobalElements()));
-
- column_space_map = std::auto_ptr<Epetra_Map> (new Epetra_Map (input_col_map));
- graph.reset();
- compressed = false;
-
- Assert (input_row_map.LinearMap() == true,
- ExcMessage ("This function is not efficient if the map is not contiguous."));
-
- std::vector<int> n_entries_per_row(input_row_map.MaxMyGID()-
- input_row_map.MinMyGID() + 1);
-
- for (unsigned int row=input_row_map.MinMyGID();
- row<static_cast<unsigned int>(input_row_map.MaxMyGID()+1);
- ++row)
- n_entries_per_row[row-input_row_map.MinMyGID()] = sp.row_length(row);
-
- if (input_row_map.Comm().NumProc() > 1)
- graph = std::auto_ptr<Epetra_FECrsGraph>
- (new Epetra_FECrsGraph(Copy, input_row_map,
- n_entries_per_row[0],
- false));
- else
- graph = std::auto_ptr<Epetra_FECrsGraph>
- (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map,
- n_entries_per_row[0],
- false));
-
- Assert (graph->NumGlobalRows() == (int)sp.n_rows(),
- ExcDimensionMismatch (graph->NumGlobalRows(),
- sp.n_rows()));
-
- const unsigned int n_rows = sp.n_rows();
- std::vector<int> row_indices;
+ std::vector<int> row_indices;
// Include possibility to exchange data
// since CompressedSimpleSparsityPattern is
if (input_row_map.MyGID(row) )
{
const int row_length = sp.row_length(row);
+ if (row_length == 0)
+ continue;
+
row_indices.resize (row_length, -1);
- for (int col=0; col < row_length; ++col)
- row_indices[col] = sp.column_number (row, col);
+ typename SparsityType::row_iterator col_num = sp.row_begin (row),
+ row_end = sp.row_end(row);
+ for (unsigned int col = 0; col_num != row_end; ++col_num, ++col)
+ row_indices[col] = *col_num;
graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
&row_indices[0]);
}
- else if ( exchange_data && sp.row_index_set().is_element(row) )
- {
- const int row_length = sp.row_length(row);
- row_indices.resize (row_length, -1);
-
- for (int col=0; col < row_length; ++col)
- row_indices[col] = sp.column_number (row, col);
-
- graph->InsertGlobalIndices (1, (int*)&row, row_length, &row_indices[0]);
- }
-
- compress();
- }
-
-
-
- template<>
- void
- SparsityPattern::reinit (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
- const CompressedSetSparsityPattern &sp,
- const bool exchange_data)
- {
- Assert (exchange_data == false, ExcNotImplemented());
- Assert (sp.n_rows() ==
- static_cast<unsigned int>(input_row_map.NumGlobalElements()),
- ExcDimensionMismatch (sp.n_rows(),
- input_row_map.NumGlobalElements()));
- Assert (sp.n_cols() ==
- static_cast<unsigned int>(input_col_map.NumGlobalElements()),
- ExcDimensionMismatch (sp.n_cols(),
- input_col_map.NumGlobalElements()));
-
- column_space_map = std::auto_ptr<Epetra_Map> (new Epetra_Map (input_col_map));
- graph.reset();
- compressed = false;
-
- Assert (input_row_map.LinearMap() == true,
- ExcMessage ("This function is not efficient if the map is not contiguous."));
-
- std::vector<int> n_entries_per_row(input_row_map.MaxMyGID()-
- input_row_map.MinMyGID() + 1);
- for (unsigned int row=input_row_map.MinMyGID();
- row<static_cast<unsigned int>(input_row_map.MaxMyGID()+1);
- ++row)
- {
- n_entries_per_row[row-input_row_map.MinMyGID()] = sp.row_length(row);
- }
-
-
- if (input_row_map.Comm().NumProc() > 1)
- graph = std::auto_ptr<Epetra_FECrsGraph>
- (new Epetra_FECrsGraph(Copy, input_row_map,
- n_entries_per_row[0],
- false));
- else
- graph = std::auto_ptr<Epetra_FECrsGraph>
- (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map,
- n_entries_per_row[0],
- false));
-
- Assert (graph->NumGlobalRows() == (int)sp.n_rows(),
- ExcDimensionMismatch (graph->NumGlobalRows(),
- sp.n_rows()));
-
-
- const unsigned int n_rows = sp.n_rows();
- std::vector<int> row_indices;
-
- for (unsigned int row=0; row<n_rows; ++row)
- if (exchange_data || input_row_map.MyGID(row))
+ else if ( exchange_data )
{
const int row_length = sp.row_length(row);
if (row_length == 0)
row_indices.resize (row_length, -1);
- CompressedSetSparsityPattern::row_iterator col_num =
- sp.row_begin (row);
-
- for (unsigned int col = 0;
- col_num != sp.row_end (row);
- ++col_num, ++col)
+ typename SparsityType::row_iterator col_num = sp.row_begin (row),
+ row_end = sp.row_end(row);
+ for (unsigned int col = 0; col_num != row_end; ++col_num, ++col)
row_indices[col] = *col_num;
- graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
- &row_indices[0]);
+ graph->InsertGlobalIndices (1, (int*)&row, row_length, &row_indices[0]);
}
compress();
const Epetra_Map &,
const dealii::CompressedSparsityPattern &,
bool);
+ template void
+ SparsityPattern::reinit (const Epetra_Map &,
+ const Epetra_Map &,
+ const dealii::CompressedSetSparsityPattern &,
+ bool);
+ template void
+ SparsityPattern::reinit (const Epetra_Map &,
+ const Epetra_Map &,
+ const dealii::CompressedSimpleSparsityPattern &,
+ bool);
}