#include <base/exceptions.h>
#include <base/subscriptor.h>
#include <base/table.h>
+#include <base/thread_management.h>
#include <vector>
#include <map>
* a second step.
*
* @ingroup dofs
- * @author Wolfgang Bangerth, 1998, 2004
+ * @author Wolfgang Bangerth, 1998, 2004, 2008
*/
class ConstraintMatrix : public Subscriptor
{
* <tt>add_entries_local_to_global()</tt>.
*/
static const Table<2,bool> default_empty_table;
+
+ /**
+ * A mutex that makes the function
+ * <tt>distribute_local_to_global()</tt>
+ * only accessible to one process at
+ * time.
+ */
+ Threads::ThreadMutex mutex;
+
+ /**
+ * A temporary array for the function
+ * <tt>distribute_local_to_global()</tt>
+ * that collects column indices for
+ * values that should be written into
+ * the same row (so that we can write
+ * the values collectively into the
+ * sparse matrix, which is faster).
+ */
+ mutable std::vector<unsigned int> column_indices;
+
+ /**
+ * A temporary array for the function
+ * <tt>distribute_local_to_global()</tt>
+ * that collects values that should be
+ * written into the same row (so that
+ * we can write them collectively into
+ * the sparse matrix, which is faster).
+ */
+ mutable std::vector<double> column_values;
};
uncondensed.global_entry(j));
else
{
- // let c point to the constraint
- // of this column
+ // let c point to the
+ // constraint of this column
std::vector<ConstraintLine>::const_iterator c = lines.begin();
while (c->line != uncondensed_struct.get_column_numbers()[j])
++c;
Assert (sorted == true, ExcMatrixNotClosed());
const unsigned int n_local_dofs = local_dof_indices.size();
+
+ // A lock that allows only one thread at
+ // time to go on in this function.
+ mutex.acquire();
// have a special case where there are no
// constraints at all, since then we can be
// a lot faster
if (lines.size() == 0)
- {
- for (unsigned int i=0; i<n_local_dofs; ++i)
- for (unsigned int j=0; j<n_local_dofs; ++j)
- global_matrix.add(local_dof_indices[i],
- local_dof_indices[j],
- local_matrix(i,j));
- }
+ global_matrix.add(local_dof_indices, local_matrix);
else
{
// here we have to do something a
// little nastier than in the
- // respective function for vectors. the
- // reason is that we have two nested
- // loops and we don't want to
- // repeatedly check whether a certain
- // dof is constrained or not by
- // searching over all the constrained
- // dofs. so we have to cache this
- // knowledge, by storing for each dof
- // index whether and where the line of
- // the constraint matrix is located
+ // respective function for
+ // vectors. the reason is that we
+ // have two nested loops and we don't
+ // want to repeatedly check whether a
+ // certain dof is constrained or not
+ // by searching over all the
+ // constrained dofs. so we have to
+ // cache this knowledge, by storing
+ // for each dof index whether and
+ // where the line of the constraint
+ // matrix is located. Moreover, we
+ // store how many entries there are
+ // at most in one constrained row in
+ // order to set the scratch array for
+ // column data to a sufficient size.
std::vector<const ConstraintLine *>
constraint_lines (n_local_dofs,
static_cast<const ConstraintLine *>(0));
+ unsigned int n_max_entries_per_row = 0;
for (unsigned int i=0; i<n_local_dofs; ++i)
{
ConstraintLine index_comparison;
lines.end(),
index_comparison);
- // if this dof is constrained, then
- // set the respective entry in the
- // array. otherwise leave it at the
- // invalid position
+ // if this dof is constrained,
+ // then set the respective entry
+ // in the array. otherwise leave
+ // it at the invalid position
if ((position != lines.end()) &&
(position->line == local_dof_indices[i]))
- constraint_lines[i] = &*position;
+ {
+ constraint_lines[i] = &*position;
+ n_max_entries_per_row += position->entries.size();
+ }
}
+ // We need to add the number of
+ // entries in the local matrix in
+ // order to obtain a sufficient size
+ // for the scratch array.
+ n_max_entries_per_row += n_local_dofs;
+ column_indices.resize(n_max_entries_per_row);
+ column_values.resize(n_max_entries_per_row);
- // now distribute entries
+ // now distribute entries row by row
for (unsigned int i=0; i<n_local_dofs; ++i)
{
const ConstraintLine *position_i = constraint_lines[i];
const bool is_constrained_i = (position_i != 0);
+
+ unsigned int col_counter = 0;
for (unsigned int j=0; j<n_local_dofs; ++j)
{
+ // we don't need to proceed when the
+ // matrix element is zero
+ if (local_matrix(i,j) == 0)
+ continue;
+
const ConstraintLine *position_j = constraint_lines[j];
const bool is_constrained_j = (position_j != 0);
(is_constrained_j == false))
{
// neither row nor column
- // is constrained
- global_matrix.add (local_dof_indices[i],
- local_dof_indices[j],
- local_matrix(i,j));
+ // is constrained, so
+ // write the value into
+ // the scratch array
+ column_indices[col_counter] = local_dof_indices[j];
+ column_values[col_counter] = local_matrix(i,j);
+ col_counter++;
}
else if ((is_constrained_i == true) &&
(is_constrained_j == false))
{
- // ok, row is constrained,
- // but column is not
+ // ok, row is
+ // constrained, but
+ // column is not. This
+ // creates entries in
+ // several rows to the
+ // same column, which is
+ // not covered by the
+ // scratch array. Write
+ // the values directly
+ // into the matrix
for (unsigned int q=0; q<position_i->entries.size(); ++q)
global_matrix.add (position_i->entries[q].first,
local_dof_indices[j],
(is_constrained_j == true))
{
// simply the other way
- // round: row ok, column is
- // constrained
+ // round: row ok, column
+ // is constrained. This
+ // time, we can put
+ // everything into the
+ // scratch array, since
+ // we are in the correct
+ // row.
for (unsigned int q=0; q<position_j->entries.size(); ++q)
- global_matrix.add (local_dof_indices[i],
- position_j->entries[q].first,
- local_matrix(i,j) *
- position_j->entries[q].second);
+ {
+ column_indices[col_counter] = position_j->entries[q].first;
+ column_values[col_counter] = local_matrix(i,j) *
+ position_j->entries[q].second;
+ col_counter++;
+ }
}
else if ((is_constrained_i == true) &&
(is_constrained_j == true))
{
- // last case: both row and
- // column are constrained
+ // last case: both row
+ // and column are
+ // constrained. Again,
+ // this creates entries
+ // in other rows than the
+ // current one, so write
+ // the values again in
+ // the matrix directly
for (unsigned int p=0; p<position_i->entries.size(); ++p)
for (unsigned int q=0; q<position_j->entries.size(); ++q)
global_matrix.add (position_i->entries[p].first,
// do something with the
// diagonal elements. add
// the absolute value of
- // the local matrix, so the
- // resulting entry will
- // always be positive and
+ // the local matrix, so
+ // the resulting entry
+ // will always be
+ // positive and
// furthermore be in the
- // same order of magnitude
- // as the other elements of
- // the matrix
+ // same order of
+ // magnitude as the other
+ // elements of the matrix
//
- // note that this
- // also captures
- // the special case
- // that a dof is
- // both constrained
- // and fixed (this
- // can happen for
+ // note that this also
+ // captures the special
+ // case that a dof is
+ // both constrained and
+ // fixed (this can happen
+ // for hanging nodes in
+ // 3d that also happen to
+ // be on the
+ // boundary). in that
+ // case, following the
+ // above program flow, it
+ // is realized that when
+ // distributing the row
+ // and column no elements
+ // of the matrix are
+ // actually touched if
+ // all the degrees of
+ // freedom to which this
+ // dof is constrained are
+ // also constrained (the
+ // usual case with
// hanging nodes in
- // 3d that also
- // happen to be on
- // the
- // boundary). in
- // that case,
- // following the
- // above program
- // flow, it is
- // realized that
- // when
- // distributing the
- // row and column
- // no elements of
- // the matrix are
- // actually touched
- // if all the
- // degrees of
- // freedom to which
- // this dof is
- // constrained are
- // also constrained
- // (the usual case
- // with hanging
- // nodes in
- // 3d). however, in
- // the line below,
- // we do actually
- // do something
+ // 3d). however, in the
+ // line below, we do
+ // actually do something
// with this dof
if (i == j)
- global_matrix.add (local_dof_indices[i],
- local_dof_indices[i],
- local_matrix(i,i));
+ {
+ column_indices[col_counter] = local_dof_indices[j];
+ column_values[col_counter] = local_matrix(i,j);
+ col_counter++;
+ }
}
else
Assert (false, ExcInternalError());
}
+
+ // Check whether we did remain within the
+ // arrays when adding elements into the
+ // scratch arrays. Moreover, there should
+ // be at least one element in the scratch
+ // array (the element diagonal).
+ Assert (col_counter <= n_max_entries_per_row, ExcInternalError());
+
+ // Finally, write the scratch array into
+ // the sparse matrix.
+ if (col_counter > 0)
+ global_matrix.add(local_dof_indices[i], col_counter,
+ &column_indices[0], &column_values[0],
+ false);
}
}
+ mutex.release();
}
<h3>lac</h3>
<ol>
+ <li>
+ <p>
+ New: All SparseMatrix classes (SparseMatrix<number>, PETSc sparse
+ matrices, Trilinos sparse matrices, block sparse matrices) can now
+ directly add and set a FullMatrix and some other arrays into their value
+ list. This is faster and more convenient than an element-by-element
+ addition/set.
+ <br>
+ (Martin Kronbichler 2008/11/26)
+ </p>
+
<li>
<p>
New: The class LAPACKFullMatrix can now invert full matrices using
<h3>deal.II</h3>
<ol>
+ <li>
+ <p>
+ Updated: The function ConstraintMatrix::distribute_local_to_global() for
+ matrices does now use row-wise addition into sparse matrices, which
+ accelerates the transfer from local to global data.
+ <br>
+ (Martin Kronbichler, WB 2008/11/27)
+ </p>
+
<li>
<p>
Fixed: The VectorTools::interpolate_boundary_values function was implemented a bit
* <tt>false</tt>, i.e., even zero
* values are treated.
*/
+ template <typename number>
void set (const std::vector<unsigned int> &indices,
- const FullMatrix<value_type> &full_matrix,
+ const FullMatrix<number> &full_matrix,
const bool elide_zero_values = false);
/**
* different local-to-global indexing
* on rows and columns, respectively.
*/
+ template <typename number>
void set (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<value_type> &full_matrix,
+ const FullMatrix<number> &full_matrix,
const bool elide_zero_values = false);
/**
* <tt>false</tt>, i.e., even zero
* values are treated.
*/
+ template <typename number>
void set (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<value_type> &values,
+ const std::vector<number> &values,
const bool elide_zero_values = false);
/**
* <tt>false</tt>, i.e., even zero
* values are inserted/replaced.
*/
+ template <typename number>
void set (const unsigned int row,
const unsigned int n_cols,
const unsigned int *col_indices,
- const value_type *values,
+ const number *values,
const bool elide_zero_values = false);
/**
* i.e., zero values won't be added
* into the matrix.
*/
+ template <typename number>
void add (const std::vector<unsigned int> &indices,
- const FullMatrix<value_type> &full_matrix,
+ const FullMatrix<number> &full_matrix,
const bool elide_zero_values = true);
/**
* different local-to-global indexing
* on rows and columns, respectively.
*/
+ template <typename number>
void add (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<value_type> &full_matrix,
+ const FullMatrix<number> &full_matrix,
const bool elide_zero_values = true);
/**
* i.e., zero values won't be added
* into the matrix.
*/
+ template <typename number>
void add (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<value_type> &values,
+ const std::vector<number> &values,
const bool elide_zero_values = true);
/**
* i.e., zero values won't be added
* into the matrix.
*/
+ template <typename number>
void add (const unsigned int row,
const unsigned int n_cols,
const unsigned int *col_indices,
- const value_type *values,
+ const number *values,
const bool elide_zero_values = true);
/**
private:
/**
- * Temporary vector for block indices
- * when writing local to global data.
+ * Temporary vector for counting the
+ * elements written into the
+ * individual blocks when doing a
+ * collective add or set.
*/
- std::vector<unsigned int> block_col_indices;
+ std::vector<unsigned int> counter_within_block;
/**
* Temporary vector for column
- * indices when writing local to
- * global data and passing the call
- * down to a SparseMatrix.
+ * indices on each block when writing
+ * local to global data on each
+ * sparse matrix.
*/
- std::vector<unsigned int> local_col_indices;
+ std::vector<std::vector<unsigned int> > column_indices;
/**
* Temporary vector for storing the
- * local lengths of the row data.
+ * local values (they need to be
+ * reordered when writing local to
+ * global).
*/
- std::vector<unsigned int> local_row_length;
+ std::vector<std::vector<double> > column_values;
};
template <class MatrixType>
+template <typename number>
inline
void
BlockMatrixBase<MatrixType>::set (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<value_type> &values,
+ const FullMatrix<number> &values,
const bool elide_zero_values)
{
Assert (row_indices.size() == values.m(),
template <class MatrixType>
+template <typename number>
inline
void
BlockMatrixBase<MatrixType>::set (const std::vector<unsigned int> &indices,
- const FullMatrix<value_type> &values,
+ const FullMatrix<number> &values,
const bool elide_zero_values)
{
Assert (indices.size() == values.m(),
template <class MatrixType>
+template <typename number>
inline
void
BlockMatrixBase<MatrixType>::set (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<value_type> &values,
+ const std::vector<number> &values,
const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
// we need to calculate to each position
// the location in the global array.
template <class MatrixType>
+template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const value_type *values,
- const bool elide_zero_values)
+BlockMatrixBase<MatrixType>::set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number *values,
+ const bool elide_zero_values)
{
// Resize scratch arrays
- local_row_length.resize (this->n_block_cols());
- block_col_indices.resize (this->n_block_cols());
- local_col_indices.resize (n_cols);
-
- // Clear the content in local_row_length
+ column_indices.resize (this->n_block_cols());
+ column_values.resize (this->n_block_cols());
+ counter_within_block.resize (this->n_block_cols());
+
+ // Resize sub-arrays to n_cols. This is a
+ // bit wasteful, but we resize only a few
+ // times (then the maximum row length
+ // won't increase that much any more).
for (unsigned int i=0; i<this->n_block_cols(); ++i)
- local_row_length[i] = 0;
+ {
+ column_indices[i].resize(n_cols);
+ column_values[i].resize(n_cols);
+ counter_within_block[i] = 0;
+ }
// Go through the column indices to find
// out which portions of the values
// should be written into which block
- // matrix.
- {
- unsigned int current_block = 0, row_length = 0;
- block_col_indices[0] = 0;
- for (unsigned int j=0; j<n_cols; ++j, ++row_length)
- {
- const std::pair<unsigned int, unsigned int>
- col_index = this->column_block_indices.global_to_local(col_indices[j]);
- local_col_indices[j] = col_index.second;
- if (col_index.first > current_block)
- {
- local_row_length[current_block] = row_length;
- row_length = 0;
- while (col_index.first > current_block)
- current_block++;
- block_col_indices[current_block] = j;
- }
+ // matrix. We need to restore all the
+ // data, since we can't be sure that the
+ // data of one block is stored
+ // contiguously (in fact, it will be
+ // intermixed when the data comes from an
+ // element matrix).
+ for (unsigned int j=0; j<n_cols; ++j)
+ {
+ double value = values[j];
- Assert (col_index.first == current_block,
- ExcInternalError());
- }
- local_row_length[current_block] = row_length;
- Assert (current_block < this->n_block_cols(),
- ExcInternalError());
+ if (value == 0 && elide_zero_values == true)
+ continue;
+
+ const std::pair<unsigned int, unsigned int>
+ col_index = this->column_block_indices.global_to_local(col_indices[j]);
+
+ const unsigned int local_index = counter_within_block[col_index.first]++;
+
+ column_indices[col_index.first][local_index] = col_index.second;
+ column_values[col_index.first][local_index] = value;
+ }
#ifdef DEBUG
// If in debug mode, do a check whether
// the right length has been obtained.
- unsigned int length = 0;
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
- length += local_row_length[i];
- Assert (length == n_cols,
- ExcDimensionMismatch(length, n_cols));
+ unsigned int length = 0;
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ length += counter_within_block[i];
+ Assert (length <= n_cols, ExcInternalError());
#endif
- }
// Now we found out about where the
// individual columns should start and
row_index = this->row_block_indices.global_to_local (row);
for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
{
- if (local_row_length[block_col] == 0)
+ if (counter_within_block[block_col] == 0)
continue;
block(row_index.first, block_col).set
(row_index.second,
- local_row_length[block_col],
- &local_col_indices[block_col_indices[block_col]],
- &values[block_col_indices[block_col]],
- elide_zero_values);
+ counter_within_block[block_col],
+ &column_indices[block_col][0],
+ &column_values[block_col][0],
+ false);
}
}
template <class MatrixType>
+template <typename number>
inline
void
BlockMatrixBase<MatrixType>::add (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<value_type> &values,
+ const FullMatrix<number> &values,
const bool elide_zero_values)
{
Assert (row_indices.size() == values.m(),
template <class MatrixType>
+template <typename number>
inline
void
BlockMatrixBase<MatrixType>::add (const std::vector<unsigned int> &indices,
- const FullMatrix<value_type> &values,
+ const FullMatrix<number> &values,
const bool elide_zero_values)
{
Assert (indices.size() == values.m(),
template <class MatrixType>
+template <typename number>
inline
void
BlockMatrixBase<MatrixType>::add (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<value_type> &values,
+ const std::vector<number> &values,
const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
// we need to calculate to each position
// the location in the global array.
template <class MatrixType>
+template <typename number>
inline
void
BlockMatrixBase<MatrixType>::add (const unsigned int row,
const unsigned int n_cols,
const unsigned int *col_indices,
- const value_type *values,
+ const number *values,
const bool elide_zero_values)
{
// TODO: Look over this to find out
// whether we can do that more
// efficiently.
-
// Resize scratch arrays
- block_col_indices.resize (this->n_block_cols());
- local_row_length.resize (this->n_block_cols());
- local_col_indices.resize (n_cols);
-
- // Clear the content in local_row_length
+ column_indices.resize (this->n_block_cols());
+ column_values.resize (this->n_block_cols());
+ counter_within_block.resize (this->n_block_cols());
+
+ // Resize sub-arrays to n_cols. This is a
+ // bit wasteful, but we resize only a few
+ // times (then the maximum row length
+ // won't increase that much any more).
for (unsigned int i=0; i<this->n_block_cols(); ++i)
- local_row_length[i] = 0;
+ {
+ column_indices[i].resize(n_cols);
+ column_values[i].resize(n_cols);
+ counter_within_block[i] = 0;
+ }
// Go through the column indices to find
// out which portions of the values
// should be written into which block
- // matrix.
- {
- unsigned int current_block = 0, row_length = 0;
- block_col_indices[0] = 0;
- for (unsigned int j=0; j<n_cols; ++j, ++row_length)
- {
- const std::pair<unsigned int, unsigned int>
- col_index = this->column_block_indices.global_to_local(col_indices[j]);
- local_col_indices[j] = col_index.second;
- if (col_index.first > current_block)
- {
- local_row_length[current_block] = row_length;
- row_length = 0;
- while (col_index.first > current_block)
- current_block++;
- block_col_indices[current_block] = j;
- }
+ // matrix. We need to restore all the
+ // data, since we can't be sure that the
+ // data of one block is stored
+ // contiguously (in fact, it will be
+ // intermixed when the data comes from an
+ // element matrix).
+ for (unsigned int j=0; j<n_cols; ++j)
+ {
+ double value = values[j];
- Assert (col_index.first == current_block,
- ExcInternalError());
- }
- local_row_length[current_block] = row_length;
- Assert (current_block < this->n_block_cols(),
- ExcInternalError());
+ if (value == 0 && elide_zero_values == true)
+ continue;
+
+ const std::pair<unsigned int, unsigned int>
+ col_index = this->column_block_indices.global_to_local(col_indices[j]);
+
+ const unsigned int local_index = counter_within_block[col_index.first]++;
+
+ column_indices[col_index.first][local_index] = col_index.second;
+ column_values[col_index.first][local_index] = value;
+ }
#ifdef DEBUG
// If in debug mode, do a check whether
// the right length has been obtained.
- unsigned int length = 0;
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
- length += local_row_length[i];
- Assert (length == n_cols,
- ExcDimensionMismatch(length, n_cols));
+ unsigned int length = 0;
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ length += counter_within_block[i];
+ Assert (length <= n_cols, ExcInternalError());
#endif
- }
// Now we found out about where the
// individual columns should start and
row_index = this->row_block_indices.global_to_local (row);
for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
{
- if (local_row_length[block_col] == 0)
+ if (counter_within_block[block_col] == 0)
continue;
block(row_index.first, block_col).add
(row_index.second,
- local_row_length[block_col],
- &local_col_indices[block_col_indices[block_col]],
- &values[block_col_indices[block_col]],
- elide_zero_values);
+ counter_within_block[block_col],
+ &column_indices[block_col][0],
+ &column_values[block_col][0],
+ false);
}
}
* <tt>false</tt>, i.e., even zero
* values are treated.
*/
+ template <typename number2>
void set (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
+ const FullMatrix<number2> &full_matrix,
const bool elide_zero_values = false);
/**
* different local-to-global indexing
* on rows and columns, respectively.
*/
+ template <typename number2>
void set (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
+ const FullMatrix<number2> &full_matrix,
const bool elide_zero_values = false);
/**
* <tt>false</tt>, i.e., even zero
* values are treated.
*/
+ template <typename number2>
void set (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
+ const std::vector<number2> &values,
const bool elide_zero_values = false);
/**
* <tt>false</tt>, i.e., even zero
* values are inserted/replaced.
*/
+ template <typename number2>
void set (const unsigned int row,
const unsigned int n_cols,
const unsigned int *col_indices,
- const number *values,
+ const number2 *values,
const bool elide_zero_values = false);
/**
* i.e., zero values won't be added
* into the matrix.
*/
+ template <typename number2>
void add (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
+ const FullMatrix<number2> &full_matrix,
const bool elide_zero_values = true);
/**
* different local-to-global indexing
* on rows and columns, respectively.
*/
+ template <typename number2>
void add (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
+ const FullMatrix<number2> &full_matrix,
const bool elide_zero_values = true);
/**
* i.e., zero values won't be added
* into the matrix.
*/
+ template <typename number2>
void add (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
+ const std::vector<number2> &values,
const bool elide_zero_values = true);
/**
* i.e., zero values won't be added
* into the matrix.
*/
+ template <typename number2>
void add (const unsigned int row,
const unsigned int n_cols,
const unsigned int *col_indices,
- const number *values,
+ const number2 *values,
const bool elide_zero_values = true);
/**
template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::set (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &values,
+ const FullMatrix<number2> &values,
const bool elide_zero_values)
{
Assert (indices.size() == values.m(),
template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::set (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &values,
+ const FullMatrix<number2> &values,
const bool elide_zero_values)
{
Assert (row_indices.size() == values.m(),
template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::set (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
+ const std::vector<number2> &values,
const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
-template <typename number>
+template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::set (const unsigned int row,
const unsigned int n_cols,
const unsigned int *col_indices,
- const number *values,
+ const number2 *values,
const bool elide_zero_values)
{
Assert (cols != 0, ExcNotInitialized());
template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::add (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &values,
+ const FullMatrix<number2> &values,
const bool elide_zero_values)
{
Assert (indices.size() == values.m(),
template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::add (const std::vector<unsigned int> &row_indices,
const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &values,
+ const FullMatrix<number2> &values,
const bool elide_zero_values)
{
Assert (row_indices.size() == values.m(),
template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::add (const unsigned int row,
const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
+ const std::vector<number2> &values,
const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
-template <typename number>
+template <typename number>
+template <typename number2>
inline
void
SparseMatrix<number>::add (const unsigned int row,
const unsigned int n_cols,
const unsigned int *col_indices,
- const number *values,
+ const number2 *values,
const bool elide_zero_values)
{
Assert (cols != 0, ExcNotInitialized());
// into it, and then build up the matrix
// from the graph. This is considerable
// faster than directly filling elements
- // into the matrix.
+ // into the matrix. Moreover, it consumes
+ // less memory, since the internal
+ // reordering does not need to be done on
+ // all the double values.
// TODO: There seems to be problem in
// Epetra when a Graph/matrix is