source/lac/sparse_vanka.cc
source/lac/sparsity_pattern.cc
source/lac/sparsity_tools.cc
-source/lac/swappable_vector.cc
-source/lac/timestep_control.cc
-source/lac/tridiagonal_matrix.cc
-source/lac/trilinos_block_sparse_matrix.cc
-source/lac/trilinos_block_vector.cc
-source/lac/trilinos_precondition.cc
-source/lac/trilinos_solver.cc
-source/lac/trilinos_sparse_matrix.cc
-source/lac/trilinos_sparsity_pattern.cc
-source/lac/trilinos_vector_base.cc
-source/lac/trilinos_vector.cc
-source/lac/vector.cc
-source/lac/vector_memory.cc
-source/lac/vector_view.cc
+BRUNO source/lac/swappable_vector.cc
+BRUNO source/lac/timestep_control.cc
+BRUNO source/lac/tridiagonal_matrix.cc
+BRUNO source/lac/trilinos_block_sparse_matrix.cc
+BRUNO source/lac/trilinos_block_vector.cc
+BRUNO source/lac/trilinos_precondition.cc
+BRUNO source/lac/trilinos_solver.cc
+BRUNO source/lac/trilinos_sparse_matrix.cc
+BRUNO source/lac/trilinos_sparsity_pattern.cc
+BRUNO source/lac/trilinos_vector_base.cc
+BRUNO source/lac/trilinos_vector.cc
+BRUNO source/lac/vector.cc
+BRUNO source/lac/vector_memory.cc
+BRUNO source/lac/vector_view.cc
source/multigrid/mg_base.cc
source/multigrid/mg_dof_accessor.cc
source/multigrid/mg_dof_handler.cc
MARKUS include/deal.II/lac/compressed_sparsity_pattern.h
WOLFGANG include/deal.II/lac/constraint_matrix.h
WOLFGANG include/deal.II/lac/constraint_matrix.templates.h
-include/deal.II/lac/eigen.h
-include/deal.II/lac/exceptions.h
-include/deal.II/lac/filtered_matrix.h
+BRUNO include/deal.II/lac/eigen.h
+BRUNO include/deal.II/lac/exceptions.h
+BRUNO include/deal.II/lac/filtered_matrix.h
include/deal.II/lac/full_matrix.h
include/deal.II/lac/full_matrix.templates.h
include/deal.II/lac/householder.h
include/deal.II/lac/sparse_vanka.templates.h
include/deal.II/lac/sparsity_pattern.h
include/deal.II/lac/sparsity_tools.h
-include/deal.II/lac/swappable_vector.h
-include/deal.II/lac/swappable_vector.templates.h
-include/deal.II/lac/transpose_matrix.h
-include/deal.II/lac/tridiagonal_matrix.h
-include/deal.II/lac/trilinos_block_sparse_matrix.h
-include/deal.II/lac/trilinos_block_vector.h
-include/deal.II/lac/trilinos_parallel_block_vector.h
-include/deal.II/lac/trilinos_precondition.h
-include/deal.II/lac/trilinos_solver.h
-include/deal.II/lac/trilinos_sparse_matrix.h
-include/deal.II/lac/trilinos_sparsity_pattern.h
-include/deal.II/lac/trilinos_vector_base.h
-include/deal.II/lac/trilinos_vector.h
-include/deal.II/lac/vector.h
-include/deal.II/lac/vector_memory.h
-include/deal.II/lac/vector.templates.h
-include/deal.II/lac/vector_view.h
+BRUNO include/deal.II/lac/swappable_vector.h
+BRUNO include/deal.II/lac/swappable_vector.templates.h
+BRUNO include/deal.II/lac/transpose_matrix.h
+BRUNO include/deal.II/lac/tridiagonal_matrix.h
+BRUNO include/deal.II/lac/trilinos_block_sparse_matrix.h
+BRUNO include/deal.II/lac/trilinos_block_vector.h
+BRUNO include/deal.II/lac/trilinos_parallel_block_vector.h
+BRUNO include/deal.II/lac/trilinos_precondition.h
+BRUNO include/deal.II/lac/trilinos_solver.h
+BRUNO include/deal.II/lac/trilinos_sparse_matrix.h
+BRUNO include/deal.II/lac/trilinos_sparsity_pattern.h
+BRUNO include/deal.II/lac/trilinos_vector_base.h
+BRUNO include/deal.II/lac/trilinos_vector.h
+BRUNO include/deal.II/lac/vector.h
+BRUNO include/deal.II/lac/vector_memory.h
+BRUNO include/deal.II/lac/vector.templates.h
+BRUNO include/deal.II/lac/vector_view.h
KAINAN include/deal.II/matrix_free/dof_info.h
KAINAN include/deal.II/matrix_free/dof_info.templates.h
BRUNO include/deal.II/matrix_free/fe_evaluation.h
class EigenPower : private Solver<VECTOR>
{
public:
+ /**
+ * Declare type of container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Standardized data struct to
* pipe additional data to the
// do a little trick to compute the sign
// with not too much effect of round-off errors.
double entry = 0.;
- unsigned int i = 0;
+ size_type i = 0;
double thresh = length/x.size();
do
{
x.scale(1./length);
// Main loop
- for (unsigned int iter=0; conv==SolverControl::iterate; iter++)
+ for (size_type iter=0; conv==SolverControl::iterate; iter++)
{
solver.solve (A_s, y, x, prec);
// do a little trick to compute the sign
// with not too much effect of round-off errors.
double entry = 0.;
- unsigned int i = 0;
+ size_type i = 0;
double thresh = length/x.size();
do
{
public:
class const_iterator;
+ /**
+ * Declare the type of container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Accessor class for iterators
*/
* pointer is sufficient.
*/
Accessor (const FilteredMatrix<VECTOR> *matrix,
- const unsigned int index);
+ const size_type index);
public:
/**
* represented by this
* object.
*/
- unsigned int row() const;
+ size_type row() const;
/**
* Column number of the
* element represented by
* this object.
*/
- unsigned int column() const;
+ size_type column() const;
/**
* Value of the right hand
/**
* Current row number.
*/
- unsigned int index;
+ size_type index;
/*
* Make enclosing class a
* friend.
* Constructor.
*/
const_iterator(const FilteredMatrix<VECTOR> *matrix,
- const unsigned int index);
+ const size_type index);
/**
* Prefix increment.
* freedom index and the value it
* shall have.
*/
- typedef std::pair<unsigned int, double> IndexValuePair;
+ typedef std::pair<size_type, double> IndexValuePair;
/**
* @name Constructors and initialization
* should have the value
* <tt>v</tt>.
*/
- void add_constraint (const unsigned int i, const double v);
+ void add_constraint (const size_type i, const double v);
/**
* Add a list of constraints to
inline
FilteredMatrix<VECTOR>::Accessor::Accessor(
const FilteredMatrix<VECTOR> *matrix,
- const unsigned int index)
+ const size_type index)
:
matrix(matrix),
index(index)
template<class VECTOR>
inline
-unsigned int
+size_type
FilteredMatrix<VECTOR>::Accessor::row() const
{
return matrix->constraints[index].first;
template<class VECTOR>
inline
-unsigned int
+size_type
FilteredMatrix<VECTOR>::Accessor::column() const
{
return matrix->constraints[index].first;
inline
FilteredMatrix<VECTOR>::const_iterator::const_iterator(
const FilteredMatrix<VECTOR> *matrix,
- const unsigned int index)
+ const size_type index)
:
accessor(matrix, index)
{}
template <class VECTOR>
inline
void
-FilteredMatrix<VECTOR>::add_constraint (const unsigned int index, const double value)
+FilteredMatrix<VECTOR>::add_constraint (const size_type index, const double value)
{
// add new constraint to end
constraints.push_back(IndexValuePair(index, value));
FilteredMatrix<VECTOR>::add_constraints (const ConstraintList &new_constraints)
{
// add new constraints to end
- const unsigned int old_size = constraints.size();
+ const size_type old_size = constraints.size();
constraints.reserve (old_size + new_constraints.size());
constraints.insert (constraints.end(),
new_constraints.begin(),
class FullMatrix : public Table<2,number>
{
public:
+ /**
+ * Declare type of container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Type of matrix entries. In analogy to
* the STL container classes.
* pointer is sufficient.
*/
Accessor (const FullMatrix<number> *matrix,
- const unsigned int row,
- const unsigned int col);
+ const size_type row,
+ const size_type col);
/**
* Row number of the element
* represented by this
* object.
*/
- unsigned int row() const;
+ size_type row() const;
/**
* Column number of the
* element represented by
* this object.
*/
- unsigned int column() const;
+ size_type column() const;
/**
* Value of this matrix entry.
/**
* Current row number.
*/
- unsigned int a_row;
+ size_type a_row;
/**
* Current column number.
* Constructor.
*/
const_iterator(const FullMatrix<number> *matrix,
- const unsigned int row,
- const unsigned int col);
+ const size_type row,
+ const size_type col);
/**
* Prefix increment.
* By default, no memory is
* allocated.
*/
- explicit FullMatrix (const unsigned int n = 0);
+ explicit FullMatrix (const size_type n = 0);
/**
* Constructor. Initialize the
* matrix as a rectangular
* matrix.
*/
- FullMatrix (const unsigned int rows,
- const unsigned int cols);
+ FullMatrix (const size_type rows,
+ const size_type cols);
/**
* Copy constructor. This
* is arranged line by line. No
* range checking is performed.
*/
- FullMatrix (const unsigned int rows,
- const unsigned int cols,
+ FullMatrix (const size_type rows,
+ const size_type cols,
const number *entries);
/**
template <int dim>
void
copy_from (const Tensor<2,dim> &T,
- const unsigned int src_r_i=0,
- const unsigned int src_r_j=dim-1,
- const unsigned int src_c_i=0,
- const unsigned int src_c_j=dim-1,
- const unsigned int dst_r=0,
- const unsigned int dst_c=0);
+ const size_type src_r_i=0,
+ const size_type src_r_j=dim-1,
+ const size_type src_c_i=0,
+ const size_type src_c_j=dim-1,
+ const size_type dst_r=0,
+ const size_type dst_c=0);
/**
* Insert a submatrix (also
template <int dim>
void
copy_to(Tensor<2,dim> &T,
- const unsigned int src_r_i=0,
- const unsigned int src_r_j=dim-1,
- const unsigned int src_c_i=0,
- const unsigned int src_c_j=dim-1,
- const unsigned int dst_r=0,
- const unsigned int dst_c=0) const;
+ const size_type src_r_i=0,
+ const size_type src_r_j=dim-1,
+ const size_type src_c_i=0,
+ const size_type src_c_j=dim-1,
+ const size_type dst_r=0,
+ const size_type dst_c=0) const;
/**
* Copy a subset of the rows and columns of another matrix into the
*/
template <typename MatrixType>
void extract_submatrix_from (const MatrixType &matrix,
- const std::vector<unsigned int> &row_index_set,
- const std::vector<unsigned int> &column_index_set);
+ const std::vector<size_type> &row_index_set,
+ const std::vector<size_type> &column_index_set);
/**
* Copy the elements of the current matrix object into a specified
*/
template <typename MatrixType>
void
- scatter_matrix_to (const std::vector<unsigned int> &row_index_set,
- const std::vector<unsigned int> &column_index_set,
+ scatter_matrix_to (const std::vector<size_type> &row_index_set,
+ const std::vector<size_type> &column_index_set,
MatrixType &matrix) const;
/**
*/
template<typename number2>
void fill (const FullMatrix<number2> &src,
- const unsigned int dst_offset_i = 0,
- const unsigned int dst_offset_j = 0,
- const unsigned int src_offset_i = 0,
- const unsigned int src_offset_j = 0);
+ const size_type dst_offset_i = 0,
+ const size_type dst_offset_j = 0,
+ const size_type src_offset_i = 0,
+ const size_type src_offset_j = 0);
/**
*/
template<typename number2>
void fill_permutation (const FullMatrix<number2> &src,
- const std::vector<unsigned int> &p_rows,
- const std::vector<unsigned int> &p_cols);
+ const std::vector<size_type> &p_rows,
+ const std::vector<size_type> &p_cols);
/**
* Set a particular entry of the matrix to a value. Thus, calling
* @param j The columns index of the element to be set.
* @param value The value to be written into the element.
*/
- void set (const unsigned int i,
- const unsigned int j,
+ void set (const size_type i,
+ const size_type j,
const number value);
/**
* @}
* To remember: this matrix is an
* <i>m x n</i>-matrix.
*/
- unsigned int m () const;
+ size_type m () const;
/**
* Number of columns of this matrix.
* To remember: this matrix is an
* <i>m x n</i>-matrix.
*/
- unsigned int n () const;
+ size_type n () const;
/**
* Return whether the matrix
* STL-like iterator with the
* first entry of row <tt>r</tt>.
*/
- const_iterator begin (const unsigned int r) const;
+ const_iterator begin (const size_type r) const;
/**
* Final iterator of row <tt>r</tt>.
*/
- const_iterator end (const unsigned int r) const;
+ const_iterator end (const size_type r) const;
//@}
///@name Modifying operators
template<typename number2>
void add (const FullMatrix<number2> &src,
const number factor,
- const unsigned int dst_offset_i = 0,
- const unsigned int dst_offset_j = 0,
- const unsigned int src_offset_i = 0,
- const unsigned int src_offset_j = 0);
+ const size_type dst_offset_i = 0,
+ const size_type dst_offset_j = 0,
+ const size_type src_offset_i = 0,
+ const size_type src_offset_j = 0);
/**
* Weighted addition of the
template<typename number2>
void Tadd (const FullMatrix<number2> &src,
const number factor,
- const unsigned int dst_offset_i = 0,
- const unsigned int dst_offset_j = 0,
- const unsigned int src_offset_i = 0,
- const unsigned int src_offset_j = 0);
+ const size_type dst_offset_i = 0,
+ const size_type dst_offset_j = 0,
+ const size_type src_offset_i = 0,
+ const size_type src_offset_j = 0);
/**
* Add a single element at the
* given position.
*/
- void add (const unsigned int row,
- const unsigned int column,
+ void add (const size_type row,
+ const size_type column,
const number value);
/**
* implementation.
*/
template <typename number2>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ void add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
const number2 *values,
const bool elide_zero_values = true,
const bool col_indices_are_sorted = false);
* s*A(j,1...n)</i>. Simple
* addition of rows of this
*/
- void add_row (const unsigned int i,
- const number s,
- const unsigned int j);
+ void add_row (const size_type i,
+ const number s,
+ const size_type j);
/**
* <i>A(i,1...n) += s*A(j,1...n) +
* t*A(k,1...n)</i>. Multiple
* addition of rows of this.
*/
- void add_row (const unsigned int i,
- const number s, const unsigned int j,
- const number t, const unsigned int k);
+ void add_row (const size_type i,
+ const number s, const size_type j,
+ const number t, const size_type k);
/**
* <i>A(1...n,i) += s*A(1...n,j)</i>.
* Simple addition of columns of this.
*/
- void add_col (const unsigned int i,
- const number s,
- const unsigned int j);
+ void add_col (const size_type i,
+ const number s,
+ const size_type j);
/**
* <i>A(1...n,i) += s*A(1...n,j) +
* t*A(1...n,k)</i>. Multiple
* addition of columns of this.
*/
- void add_col (const unsigned int i,
- const number s, const unsigned int j,
- const number t, const unsigned int k);
+ void add_col (const size_type i,
+ const number s, const size_type j,
+ const number t, const size_type k);
/**
* Swap <i>A(i,1...n) <->
* A(j,1...n)</i>. Swap rows i
* and j of this
*/
- void swap_row (const unsigned int i,
- const unsigned int j);
+ void swap_row (const size_type i,
+ const size_type j);
/**
* Swap <i>A(1...n,i) <->
* A(1...n,j)</i>. Swap columns
* i and j of this
*/
- void swap_col (const unsigned int i,
- const unsigned int j);
+ void swap_col (const size_type int i,
+ const size_type int j);
/**
* Add constant to diagonal
* Exception
*/
DeclException3 (ExcInvalidDestination,
- int, int, int,
+ size_type, size_type, size_type,
<< "Target region not in matrix: size in this direction="
<< arg1 << ", size of new matrix=" << arg2
<< ", offset=" << arg3);
template <typename number>
inline
-unsigned int
+size_type
FullMatrix<number>::m() const
{
return this->n_rows();
template <typename number>
inline
-unsigned int
+size_type
FullMatrix<number>::n() const
{
return this->n_cols();
inline
void
FullMatrix<number>::extract_submatrix_from (const MatrixType &matrix,
- const std::vector<unsigned int> &row_index_set,
- const std::vector<unsigned int> &column_index_set)
+ const std::vector<size_type> &row_index_set,
+ const std::vector<size_type> &column_index_set)
{
AssertDimension(row_index_set.size(), this->n_rows());
AssertDimension(column_index_set.size(), this->n_cols());
- const unsigned int n_rows_submatrix = row_index_set.size();
- const unsigned int n_cols_submatrix = column_index_set.size();
+ const size_type n_rows_submatrix = row_index_set.size();
+ const size_type n_cols_submatrix = column_index_set.size();
- for (unsigned int sub_row = 0; sub_row < n_rows_submatrix; ++sub_row)
- for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col)
+ for (size_type sub_row = 0; sub_row < n_rows_submatrix; ++sub_row)
+ for (size_type sub_col = 0; sub_col < n_cols_submatrix; ++sub_col)
(*this)(sub_row, sub_col) = matrix.el(row_index_set[sub_row], column_index_set[sub_col]);
}
template <typename MatrixType>
inline
void
-FullMatrix<number>::scatter_matrix_to (const std::vector<unsigned int> &row_index_set,
- const std::vector<unsigned int> &column_index_set,
+FullMatrix<number>::scatter_matrix_to (const std::vector<size_type> &row_index_set,
+ const std::vector<size_type> &column_index_set,
MatrixType &matrix) const
{
AssertDimension(row_index_set.size(), this->n_rows());
AssertDimension(column_index_set.size(), this->n_cols());
- const unsigned int n_rows_submatrix = row_index_set.size();
- const unsigned int n_cols_submatrix = column_index_set.size();
+ const size_type n_rows_submatrix = row_index_set.size();
+ const size_type n_cols_submatrix = column_index_set.size();
- for (unsigned int sub_row = 0; sub_row < n_rows_submatrix; ++sub_row)
- for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col)
+ for (size_type sub_row = 0; sub_row < n_rows_submatrix; ++sub_row)
+ for (size_type sub_col = 0; sub_col < n_cols_submatrix; ++sub_col)
matrix.set(row_index_set[sub_row],
column_index_set[sub_col],
(*this)(sub_row, sub_col));
template <typename number>
inline
void
-FullMatrix<number>::set (const unsigned int i,
- const unsigned int j,
+FullMatrix<number>::set (const size_type i,
+ const size_type j,
const number value)
{
(*this)(i,j) = value;
inline
FullMatrix<number>::Accessor::
Accessor (const FullMatrix<number> *matrix,
- const unsigned int r,
- const unsigned int c)
+ const size_type r,
+ const size_type c)
:
matrix(matrix),
a_row(r),
template <typename number>
inline
-unsigned int
+size_type
FullMatrix<number>::Accessor::row() const
{
return a_row;
template <typename number>
inline
-unsigned int
+size_type
FullMatrix<number>::Accessor::column() const
{
return a_col;
inline
FullMatrix<number>::const_iterator::
const_iterator(const FullMatrix<number> *matrix,
- const unsigned int r,
- const unsigned int c)
+ const size_type r,
+ const size_type c)
:
accessor(matrix, r, c)
{}
template <typename number>
inline
typename FullMatrix<number>::const_iterator
-FullMatrix<number>::begin (const unsigned int r) const
+FullMatrix<number>::begin (const size_type r) const
{
Assert (r<m(), ExcIndexRange(r,0,m()));
return const_iterator(this, r, 0);
template <typename number>
inline
typename FullMatrix<number>::const_iterator
-FullMatrix<number>::end (const unsigned int r) const
+FullMatrix<number>::end (const size_type r) const
{
Assert (r<m(), ExcIndexRange(r,0,m()));
return const_iterator(this, r+1, 0);
template <typename number>
inline
void
-FullMatrix<number>::add (const unsigned int r, const unsigned int c, const number v)
+FullMatrix<number>::add (const size_type r, const size_type c, const number v)
{
AssertIndexRange(r, this->m());
AssertIndexRange(c, this->n());
template <typename number2>
inline
void
-FullMatrix<number>::add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
+FullMatrix<number>::add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number2 *values,
const bool,
const bool)
{
AssertIndexRange(row, this->m());
- for (unsigned int col=0; col<n_cols; ++col)
+ for (size_type col=0; col<n_cols; ++col)
{
AssertIndexRange(col_indices[col], this->n());
this->operator()(row,col_indices[col]) += values[col];
template <class STREAM>
inline
void
-FullMatrix<number>::print (STREAM &s,
- const unsigned int w,
- const unsigned int p) const
+FullMatrix<number>::print (STREAM &s,
+ const size_type w,
+ const size_type p) const
{
Assert (!this->empty(), ExcEmptyMatrix());
- for (unsigned int i=0; i<this->m(); ++i)
+ for (size_type i=0; i<this->m(); ++i)
{
- for (unsigned int j=0; j<this->n(); ++j)
+ for (size_type j=0; j<this->n(); ++j)
s << std::setw(w) << std::setprecision(p) << this->el(i,j);
s << std::endl;
}
class TridiagonalMatrix
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* @name Constructors and initalization.
*/
* empty matrix of dimension
* <tt>n</tt>.
*/
- TridiagonalMatrix(unsigned int n = 0,
+ TridiagonalMatrix(size_type n = 0,
bool symmetric = false);
/**
* to zero. The symmetry
* properties may be set as well.
*/
- void reinit(unsigned int n,
+ void reinit(size_type n,
bool symmetric = false);
* To remember: this matrix is an
* <i>m x m</i>-matrix.
*/
- unsigned int m () const;
+ size_type m () const;
/**
* Number of columns of this matrix.
* To remember: this matrix is an
* <i>n x n</i>-matrix.
*/
- unsigned int n () const;
+ size_type n () const;
/**
* Return whether the matrix
* the case where <i>|i-j| <=
* 1</i>.
*/
- number operator()(unsigned int i, unsigned int j) const;
+ number operator()(size_type i, size_type j) const;
/**
* Read-write access to a
* for matrix assembling in order
* not to obtain doubled entries.
*/
- number &operator()(unsigned int i, unsigned int j);
+ number &operator()(size_type i, size_type j);
//@}
///@name Multiplications with vectors
* compute_eigenvalues(), you can
* access each eigenvalue here.
*/
- number eigenvalue(const unsigned int i) const;
+ number eigenvalue(const size_type i) const;
//@}
///@name Miscellanea
//@{
#ifndef DOXYGEN
template<typename number>
-unsigned int
+size_type
TridiagonalMatrix<number>::m() const
{
return diagonal.size();
template<typename number>
-unsigned int
+size_type
TridiagonalMatrix<number>::n() const
{
return diagonal.size();
template<typename number>
inline
number
-TridiagonalMatrix<number>::operator()(unsigned int i, unsigned int j) const
+TridiagonalMatrix<number>::operator()(size_type i, size_type j) const
{
Assert(i<n(), ExcIndexRange(i,0,n()));
Assert(j<n(), ExcIndexRange(j,0,n()));
template<typename number>
inline
number &
-TridiagonalMatrix<number>::operator()(unsigned int i, unsigned int j)
+TridiagonalMatrix<number>::operator()(size_type i, size_type j)
{
Assert(i<n(), ExcIndexRange(i,0,n()));
Assert(j<n(), ExcIndexRange(j,0,n()));
const unsigned int width,
const unsigned int) const
{
- for (unsigned int i=0; i<n(); ++i)
+ for (size_type i=0; i<n(); ++i)
{
if (i>0)
s << std::setw(width) << (*this)(i,i-1);
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare the type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare the type of integer.
+ */
+ typedef long long int_type;
+#endif
+
/**
* Typedef the base class for simpler
* access to its own typedefs.
* user call whatever function
* she desires.
*/
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
+ void reinit (const size_type n_block_rows,
+ const size_type n_block_columns);
/**
* Resize the matrix, by using an
* elements of this
* matrix.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Matrix-vector multiplication:
{
Assert (d==0, ExcScalarAssignmentOnlyForZeroValue());
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
this->block(r,c) = d;
return *this;
BlockSparseMatrix::is_compressed () const
{
bool compressed = true;
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
if (block(row, col).is_compressed() == false)
{
compressed = false;
class BlockVector : public BlockVectorBase<Vector>
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Typedef the base class for simpler
* access to its own typedefs.
* fill appropriate data using a
* reinit of the blocks.
*/
- BlockVector (const unsigned int num_blocks);
+ BlockVector (const size_type num_blocks);
/**
* Constructor. Set the number of
*
* References BlockVector.reinit().
*/
- BlockVector (const std::vector<unsigned int> &N);
+ BlockVector (const std::vector<size_type> &N);
/**
* Constructor. Set the number of
* different blocks.
*/
template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end);
+ BlockVector (const std::vector<size_type> &n,
+ const InputIterator first,
+ const InputIterator end);
/**
* Destructor. Clears memory
* If <tt>fast==false</tt>, the vector
* is filled with zeros.
*/
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
+ void reinit (const std::vector<size_type> &N,
+ const bool fast=false);
/**
* Reinit the function
* calls <tt>collect_sizes</tt>
* afterwards.
*/
- void reinit (const unsigned int num_blocks);
+ void reinit (const size_type num_blocks);
/**
* Swap the contents of this
inline
- BlockVector::BlockVector (const std::vector<unsigned int> &N)
+ BlockVector::BlockVector (const std::vector<size_type> &N)
{
reinit (N);
}
template <typename InputIterator>
- BlockVector::BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end)
+ BlockVector::BlockVector (const std::vector<size_type> &n,
+ const InputIterator first,
+ const InputIterator end)
{
// first set sizes of blocks, but
// don't initialize them as we will
// copy elements soon
reinit (n, true);
InputIterator start = first;
- for (unsigned int b=0; b<n.size(); ++b)
+ for (size_type b=0; b<n.size(); ++b)
{
InputIterator end = start;
- std::advance (end, static_cast<signed int>(n[b]));
+ std::advance (end, static_cast<size_type>(n[b]));
- for (unsigned int i=0; i<n[b]; ++i, ++start)
+ for (size_type i=0; i<n[b]; ++i, ++start)
this->block(b)(i) = *start;
}
Assert (start == end, ExcIteratorRangeDoesNotMatchVectorSize());
inline
- BlockVector::BlockVector (const unsigned int num_blocks)
+ BlockVector::BlockVector (const size_type num_blocks)
{
reinit (num_blocks);
}
this->components.resize (v.n_blocks());
this->block_indices = v.block_indices;
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.components[i];
}
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(),v.n_blocks()));
- for (unsigned int row=0; row<n_blocks(); ++row)
+ for (size_type row=0; row<n_blocks(); ++row)
block(row).swap (v.block(row));
}
{
if (n_blocks() != v.n_blocks())
{
- std::vector<unsigned int> block_sizes (v.n_blocks(), 0);
+ std::vector<size_type> block_sizes (v.n_blocks(), 0);
block_indices.reinit (block_sizes);
if (components.size() != n_blocks())
components.resize(n_blocks());
}
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.block(i);
collect_sizes();
class BlockVector : public BlockVectorBase<Vector>
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Typedef the base class for simpler
* access to its own typedefs.
* fill appropriate data using a
* reinit of the blocks.
*/
- BlockVector (const unsigned int num_blocks);
+ BlockVector (const size_type num_blocks);
/**
* Destructor. Clears memory
* calls <tt>collect_sizes</tt>
* afterwards.
*/
- void reinit (const unsigned int num_blocks);
+ void reinit (const size_type num_blocks);
/**
* This reinit function is meant to
inline
- BlockVector::BlockVector (const unsigned int num_blocks)
+ BlockVector::BlockVector (const size_type num_blocks)
{
reinit (num_blocks);
}
this->components.resize (v.n_blocks());
this->block_indices = v.block_indices;
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.components[i];
}
BlockVector::is_compressed () const
{
bool compressed = true;
- for (unsigned int row=0; row<n_blocks(); ++row)
+ for (size_type row=0; row<n_blocks(); ++row)
if (block(row).is_compressed() == false)
{
compressed = false;
{
if (n_blocks() != v.n_blocks())
{
- std::vector<unsigned int> block_sizes (v.n_blocks(), 0);
+ std::vector<size_type> block_sizes (v.n_blocks(), 0);
block_indices.reinit (block_sizes);
if (components.size() != n_blocks())
components.resize(n_blocks());
}
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.block(i);
collect_sizes();
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(),v.n_blocks()));
- for (unsigned int row=0; row<n_blocks(); ++row)
+ for (size_type row=0; row<n_blocks(); ++row)
block(row).swap (v.block(row));
}
class PreconditionBase : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare type of integer.
+ */
+ typedef long long int_type;
+#endif
/**
* Standardized data struct to
* Prints an estimate of the memory
* consumption of this class.
*/
- std::size_t memory_consumption () const;
+ size_type memory_consumption () const;
private:
/**
void PreconditionBase::vmult (dealii::Vector<double> &dst,
const dealii::Vector<double> &src) const
{
- AssertDimension (static_cast<int>(dst.size()),
+ AssertDimension (static_cast<int_type>(dst.size()),
preconditioner->OperatorDomainMap().NumMyElements());
- AssertDimension (static_cast<int>(src.size()),
+ AssertDimension (static_cast<int_type>(src.size()),
preconditioner->OperatorRangeMap().NumMyElements());
Epetra_Vector tril_dst (View, preconditioner->OperatorDomainMap(),
dst.begin());
void PreconditionBase::Tvmult (dealii::Vector<double> &dst,
const dealii::Vector<double> &src) const
{
- AssertDimension (static_cast<int>(dst.size()),
+ AssertDimension (static_cast<int_type>(dst.size()),
preconditioner->OperatorDomainMap().NumMyElements());
- AssertDimension (static_cast<int>(src.size()),
+ AssertDimension (static_cast<int_type>(src.size()),
preconditioner->OperatorRangeMap().NumMyElements());
Epetra_Vector tril_dst (View, preconditioner->OperatorDomainMap(),
dst.begin());
PreconditionBase::vmult (parallel::distributed::Vector<double> &dst,
const parallel::distributed::Vector<double> &src) const
{
- AssertDimension (static_cast<int>(dst.local_size()),
+ AssertDimension (static_cast<int_type>(dst.local_size()),
preconditioner->OperatorDomainMap().NumMyElements());
- AssertDimension (static_cast<int>(src.local_size()),
+ AssertDimension (static_cast<int_type>(src.local_size()),
preconditioner->OperatorRangeMap().NumMyElements());
Epetra_Vector tril_dst (View, preconditioner->OperatorDomainMap(),
dst.begin());
PreconditionBase::Tvmult (parallel::distributed::Vector<double> &dst,
const parallel::distributed::Vector<double> &src) const
{
- AssertDimension (static_cast<int>(dst.local_size()),
+ AssertDimension (static_cast<int_type>(dst.local_size()),
preconditioner->OperatorDomainMap().NumMyElements());
- AssertDimension (static_cast<int>(src.local_size()),
+ AssertDimension (static_cast<int_type>(src.local_size()),
preconditioner->OperatorRangeMap().NumMyElements());
Epetra_Vector tril_dst (View, preconditioner->OperatorDomainMap(),
dst.begin());
* Exception
*/
DeclException3 (ExcAccessToNonlocalRow,
- int, int, int,
+ std::size_t, std::size_t, std::size_t,
<< "You tried to access row " << arg1
<< " of a distributed sparsity pattern, "
<< " but only rows " << arg2 << " through " << arg3
class AccessorBase
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare type of integer.
+ */
+ typedef long long int_type;
+#endif
+
/**
* Constructor.
*/
AccessorBase (SparseMatrix *matrix,
- const unsigned int row,
- const unsigned int index);
+ const size_type row,
+ const size_type index);
/**
* Row number of the element
* represented by this object.
*/
- unsigned int row() const;
+ size_type row() const;
/**
* Index in row of the element
* represented by this object.
*/
- unsigned int index() const;
+ size_type index() const;
/**
* Column number of the element
* represented by this object.
*/
- unsigned int column() const;
+ size_type column() const;
protected:
/**
/**
* Current row number.
*/
- unsigned int a_row;
+ size_type a_row;
/**
* Current index in row.
*/
- unsigned int a_index;
+ size_type a_index;
/**
* Discard the old row caches
* than one accessor can access
* this data if necessary.
*/
- std_cxx1x::shared_ptr<std::vector<unsigned int> > colnum_cache;
+ std_cxx1x::shared_ptr<std::vector<size_type> > colnum_cache;
/**
* Cache for the values
* pointer is sufficient.
*/
Accessor (MatrixType *matrix,
- const unsigned int row,
- const unsigned int index);
+ const size_type row,
+ const size_type index);
/**
* Copy constructor to get from a
* pointer is sufficient.
*/
Accessor (MatrixType *matrix,
- const unsigned int row,
- const unsigned int index);
+ const size_type row,
+ const size_type index);
/**
* Value of this matrix entry.
class Iterator
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Typedef for the matrix type
* (including constness) we are to
* the index within it.
*/
Iterator (MatrixType *matrix,
- const unsigned int row,
- const unsigned int index);
+ const size_type row,
+ const size_type index);
/**
* Copy constructor with
* Exception
*/
DeclException2 (ExcInvalidIndexWithinRow,
- int, int,
+ size_type, size_type,
<< "Attempt to access element " << arg2
<< " of row " << arg1
<< " which doesn't have that many elements.");
* row is specified as the maximum
* number of entries argument.
*/
- SparseMatrix (const unsigned int m,
- const unsigned int n,
- const unsigned int n_max_entries_per_row);
+ SparseMatrix (const size_type m,
+ const size_type n,
+ const size_type n_max_entries_per_row);
/**
* Generate a matrix that is completely
* specifies the number of entries in
* each row.
*/
- SparseMatrix (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparseMatrix (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row);
/**
* Generate a matrix from a Trilinos
* memory prior to use (in the
* compress() step).
*/
- SparseMatrix (const Epetra_Map ¶llel_partitioning,
- const unsigned int n_max_entries_per_row = 0);
+ SparseMatrix (const Epetra_Map ¶llel_partitioning,
+ const size_type n_max_entries_per_row = 0);
/**
* Same as before, but now set a
* SparseMatrix::reinit call
* considerably faster.
*/
- SparseMatrix (const Epetra_Map ¶llel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparseMatrix (const Epetra_Map ¶llel_partitioning,
+ const std::vector<size_type> &n_entries_per_row);
/**
* This constructor is similar to the
* number of columns entries per row
* that will be allocated.
*/
- SparseMatrix (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const unsigned int n_max_entries_per_row = 0);
+ SparseMatrix (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
+ const size_type n_max_entries_per_row = 0);
/**
* This constructor is similar to the
*/
SparseMatrix (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
+ const std::vector<size_type> &n_entries_per_row);
/**
* This function is initializes the
* memory prior to use (in the
* compress() step).
*/
- SparseMatrix (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_max_entries_per_row = 0);
+ SparseMatrix (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const size_type n_max_entries_per_row = 0);
/**
* Same as before, but now set the
* SparseMatrix::reinit call
* considerably faster.
*/
- SparseMatrix (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparseMatrix (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<size_type> &n_entries_per_row);
/**
* This constructor is similar to the
* structure is reorganized in the
* compress() call.
*/
- SparseMatrix (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_max_entries_per_row = 0);
+ SparseMatrix (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const size_type n_max_entries_per_row = 0);
/**
* This constructor is similar to the
* each row of the newly generated
* matrix.
*/
- SparseMatrix (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparseMatrix (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<size_type> &n_entries_per_row);
/**
* This function is initializes the
* Return the number of rows in
* this matrix.
*/
- unsigned int m () const;
+ size_type m () const;
/**
* Return the number of columns
* in this matrix.
*/
- unsigned int n () const;
+ size_type n () const;
/**
* Return the local dimension
* exactly are stored locally,
* use local_range().
*/
- unsigned int local_size () const;
+ size_type local_size () const;
/**
* Return a pair of indices
* where
* <tt>n=local_size()</tt>.
*/
- std::pair<unsigned int, unsigned int>
+ std::pair<size_type, size_type>
local_range () const;
/**
* in the local range or not,
* see also local_range().
*/
- bool in_local_range (const unsigned int index) const;
+ bool in_local_range (const size_type index) const;
/**
* Return the number of nonzero
* elements of this matrix.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Number of entries in a
* specific row.
*/
- unsigned int row_length (const unsigned int row) const;
+ size_type row_length (const size_type row) const;
/**
* Returns the state of the matrix,
* returned in case this is called in
* an MPI-based program.
*/
- std::size_t memory_consumption () const;
+ size_type memory_consumption () const;
//@}
/**
* the matrix with a sparsity pattern
* first.
*/
- void set (const unsigned int i,
- const unsigned int j,
+ void set (const size_type i,
+ const size_type j,
const TrilinosScalar value);
/**
* <tt>false</tt>, i.e., even zero
* values are inserted/replaced.
*/
- void set (const std::vector<unsigned int> &indices,
+ void set (const std::vector<size_type> &indices,
const FullMatrix<TrilinosScalar> &full_matrix,
const bool elide_zero_values = false);
* different local-to-global indexing
* on rows and columns, respectively.
*/
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
+ void set (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
const FullMatrix<TrilinosScalar> &full_matrix,
const bool elide_zero_values = false);
* <tt>false</tt>, i.e., even zero
* values are inserted/replaced.
*/
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ void set (const size_type row,
+ const std::vector<size_type> &col_indices,
const std::vector<TrilinosScalar> &values,
const bool elide_zero_values = false);
* <tt>false</tt>, i.e., even zero
* values are inserted/replaced.
*/
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ void set (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
const TrilinosScalar *values,
const bool elide_zero_values = false);
* <tt>value</tt> is not a finite
* number an exception is thrown.
*/
- void add (const unsigned int i,
- const unsigned int j,
+ void add (const size_type i,
+ const size_type j,
const TrilinosScalar value);
/**
* i.e., zero values won't be added
* into the matrix.
*/
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const FullMatrix<TrilinosScalar> &full_matrix,
const bool elide_zero_values = true);
* different local-to-global indexing
* on rows and columns, respectively.
*/
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
+ void add (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
const FullMatrix<TrilinosScalar> &full_matrix,
const bool elide_zero_values = true);
* i.e., zero values won't be added
* into the matrix.
*/
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ void add (const size_type row,
+ const std::vector<size_type> &col_indices,
const std::vector<TrilinosScalar> &values,
const bool elide_zero_values = true);
* i.e., zero values won't be added
* into the matrix.
*/
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ void add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
const TrilinosScalar *values,
const bool elide_zero_values = true,
const bool col_indices_are_sorted = false);
* default is to set it to
* zero.
*/
- void clear_row (const unsigned int row,
+ void clear_row (const size_type row,
const TrilinosScalar new_diag_value = 0);
/**
* diagonal entries, you have
* to set them by hand.
*/
- void clear_rows (const std::vector<unsigned int> &rows,
- const TrilinosScalar new_diag_value = 0);
+ void clear_rows (const std::vector<size_type> &rows,
+ const TrilinosScalar new_diag_value = 0);
/**
* Make an in-place transpose
* is not saved on the calling
* process.
*/
- TrilinosScalar operator () (const unsigned int i,
- const unsigned int j) const;
+ TrilinosScalar operator () (const size_type i,
+ const size_type j) const;
/**
* Return the value of the
* processor, and possibly so
* with a nonzero value.
*/
- TrilinosScalar el (const unsigned int i,
- const unsigned int j) const;
+ TrilinosScalar el (const size_type i,
+ const size_type j) const;
/**
* Return the main diagonal
* See also the comment in
* trilinos_sparse_matrix.cc.
*/
- TrilinosScalar diag_element (const unsigned int i) const;
+ TrilinosScalar diag_element (const size_type i) const;
//@}
/**
* that the iterator may not be
* dereferencable in that case.
*/
- const_iterator begin (const unsigned int r) const;
+ const_iterator begin (const size_type r) const;
/**
* Final iterator of row
* the end iterator for the
* last row of a matrix.
*/
- const_iterator end (const unsigned int r) const;
+ const_iterator end (const size_type r) const;
/**
* STL-like iterator with the
* that the iterator may not be
* dereferencable in that case.
*/
- iterator begin (const unsigned int r);
+ iterator begin (const size_type r);
/**
* Final iterator of row
* the end iterator for the
* last row of a matrix.
*/
- iterator end (const unsigned int r);
+ iterator end (const size_type r);
//@}
/**
* Exception
*/
DeclException2 (ExcInvalidIndex,
- int, int,
+ size_type, size_type,
<< "The entry with index <" << arg1 << ',' << arg2
<< "> does not exist.");
* Exception
*/
DeclException4 (ExcAccessToNonLocalElement,
- int, int, int, int,
+ size_type, size_type, size_type, size_type,
<< "You tried to access element (" << arg1
<< "/" << arg2 << ")"
<< " of a distributed matrix, but only rows "
* Exception
*/
DeclException2 (ExcAccessToNonPresentElement,
- int, int,
+ size_type, size_type,
<< "You tried to access element (" << arg1
<< "/" << arg2 << ")"
<< " of a sparse matrix, but it appears to not"
* adding/inserting local data into
* the (large) sparse matrix.
*/
- std::vector<unsigned int> column_indices;
+ std::vector<size_type> column_indices;
/**
* An internal array of double values
namespace SparseMatrixIterators
{
inline
- AccessorBase::AccessorBase(SparseMatrix *matrix, unsigned int row, unsigned int index)
+ AccessorBase::AccessorBase(SparseMatrix *matrix, size_type row, size_type index)
:
matrix(matrix),
a_row(row),
inline
- unsigned int
+ size_type
AccessorBase::row() const
{
Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
inline
- unsigned int
+ size_type
AccessorBase::column() const
{
Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
inline
- unsigned int
+ size_type
AccessorBase::index() const
{
Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
inline
Accessor<true>::Accessor (MatrixType *matrix,
- const unsigned int row,
- const unsigned int index)
+ const size_type row,
+ const size_type index)
:
AccessorBase(const_cast<SparseMatrix *>(matrix), row, index)
{}
inline
Accessor<false>::Accessor (MatrixType *matrix,
- const unsigned int row,
- const unsigned int index)
+ const size_type row,
+ const size_type index)
:
AccessorBase(matrix, row, index)
{}
template <bool Constness>
inline
Iterator<Constness>::Iterator(MatrixType *matrix,
- const unsigned int row,
- const unsigned int index)
+ const size_type row,
+ const size_type index)
:
accessor(matrix, row, index)
{}
inline
SparseMatrix::const_iterator
- SparseMatrix::begin(const unsigned int r) const
+ SparseMatrix::begin(const size_type r) const
{
Assert (r < m(), ExcIndexRange(r, 0, m()));
if (row_length(r) > 0)
inline
SparseMatrix::const_iterator
- SparseMatrix::end(const unsigned int r) const
+ SparseMatrix::end(const size_type r) const
{
Assert (r < m(), ExcIndexRange(r, 0, m()));
// place the iterator on the first entry
// past this line, or at the end of the
// matrix
- for (unsigned int i=r+1; i<m(); ++i)
+ for (size_type i=r+1; i<m(); ++i)
if (row_length(i) > 0)
return const_iterator(this, i, 0);
inline
SparseMatrix::iterator
- SparseMatrix::begin(const unsigned int r)
+ SparseMatrix::begin(const size_type r)
{
Assert (r < m(), ExcIndexRange(r, 0, m()));
if (row_length(r) > 0)
inline
SparseMatrix::iterator
- SparseMatrix::end(const unsigned int r)
+ SparseMatrix::end(const size_type r)
{
Assert (r < m(), ExcIndexRange(r, 0, m()));
// place the iterator on the first entry
// past this line, or at the end of the
// matrix
- for (unsigned int i=r+1; i<m(); ++i)
+ for (size_type i=r+1; i<m(); ++i)
if (row_length(i) > 0)
return iterator(this, i, 0);
inline
bool
- SparseMatrix::in_local_range (const unsigned int index) const
+ SparseMatrix::in_local_range (const size_type index) const
{
- int begin, end;
+ int_type begin, end;
begin = matrix->RowMap().MinMyGID();
end = matrix->RowMap().MaxMyGID()+1;
- return ((index >= static_cast<unsigned int>(begin)) &&
- (index < static_cast<unsigned int>(end)));
+ return ((index >= static_cast<size_type>(begin)) &&
+ (index < static_cast<size_type>(end)));
}
// compile time.
inline
void
- SparseMatrix::set (const unsigned int i,
- const unsigned int j,
+ SparseMatrix::set (const size_type i,
+ const size_type j,
const TrilinosScalar value)
{
inline
void
- SparseMatrix::set (const std::vector<unsigned int> &indices,
+ SparseMatrix::set (const std::vector<size_type> &indices,
const FullMatrix<TrilinosScalar> &values,
const bool elide_zero_values)
{
ExcDimensionMismatch(indices.size(), values.m()));
Assert (values.m() == values.n(), ExcNotQuadratic());
- for (unsigned int i=0; i<indices.size(); ++i)
+ for (size_type i=0; i<indices.size(); ++i)
set (indices[i], indices.size(), &indices[0], &values(i,0),
elide_zero_values);
}
inline
void
- SparseMatrix::set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
+ SparseMatrix::set (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
const FullMatrix<TrilinosScalar> &values,
const bool elide_zero_values)
{
Assert (col_indices.size() == values.n(),
ExcDimensionMismatch(col_indices.size(), values.n()));
- for (unsigned int i=0; i<row_indices.size(); ++i)
+ for (size_type i=0; i<row_indices.size(); ++i)
set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
elide_zero_values);
}
inline
void
- SparseMatrix::set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ SparseMatrix::set (const size_type row,
+ const std::vector<size_type> &col_indices,
const std::vector<TrilinosScalar> &values,
const bool elide_zero_values)
{
inline
void
- SparseMatrix::set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ SparseMatrix::set (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
const TrilinosScalar *values,
const bool elide_zero_values)
{
last_action = Insert;
- int *col_index_ptr;
+ int_type *col_index_ptr;
TrilinosScalar const *col_value_ptr;
- int n_columns;
+ int_type n_columns;
// If we don't elide zeros, the pointers
// are already available...
if (elide_zero_values == false)
{
- col_index_ptr = (int *)col_indices;
+ col_index_ptr = (int_type *)col_indices;
col_value_ptr = values;
n_columns = n_cols;
}
}
n_columns = 0;
- for (unsigned int j=0; j<n_cols; ++j)
+ for (size_type j=0; j<n_cols; ++j)
{
const double value = values[j];
Assert (numbers::is_finite(value), ExcNumberNotFinite());
}
}
- Assert(n_columns <= (int)n_cols, ExcInternalError());
+ Assert(n_columns <= (int_type)n_cols, ExcInternalError());
- col_index_ptr = (int *)&column_indices[0];
+ col_index_ptr = (int_type *)&column_indices[0];
col_value_ptr = &column_values[0];
}
// add the possibility to insert new values,
// and in the second we just replace
// data.
- if (row_partitioner().MyGID(static_cast<int>(row)) == true)
+ if (row_partitioner().MyGID(static_cast<int_type>(row)) == true)
{
if (matrix->Filled() == false)
{
if (matrix->Filled() == false)
{
- ierr = matrix->InsertGlobalValues (1, (int *)&row,
+ ierr = matrix->InsertGlobalValues (1, (int_type *)&row,
n_columns, col_index_ptr,
&col_value_ptr,
Epetra_FECrsMatrix::ROW_MAJOR);
ierr = 0;
}
else
- ierr = matrix->ReplaceGlobalValues (1, (int *)&row,
+ ierr = matrix->ReplaceGlobalValues (1, (int_type *)&row,
n_columns, col_index_ptr,
&col_value_ptr,
Epetra_FECrsMatrix::ROW_MAJOR);
inline
void
- SparseMatrix::add (const unsigned int i,
- const unsigned int j,
+ SparseMatrix::add (const size_type i,
+ const size_type j,
const TrilinosScalar value)
{
Assert (numbers::is_finite(value), ExcNumberNotFinite());
inline
void
- SparseMatrix::add (const std::vector<unsigned int> &indices,
+ SparseMatrix::add (const std::vector<size_type> &indices,
const FullMatrix<TrilinosScalar> &values,
const bool elide_zero_values)
{
ExcDimensionMismatch(indices.size(), values.m()));
Assert (values.m() == values.n(), ExcNotQuadratic());
- for (unsigned int i=0; i<indices.size(); ++i)
+ for (size_type i=0; i<indices.size(); ++i)
add (indices[i], indices.size(), &indices[0], &values(i,0),
elide_zero_values);
}
inline
void
- SparseMatrix::add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
+ SparseMatrix::add (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
const FullMatrix<TrilinosScalar> &values,
const bool elide_zero_values)
{
Assert (col_indices.size() == values.n(),
ExcDimensionMismatch(col_indices.size(), values.n()));
- for (unsigned int i=0; i<row_indices.size(); ++i)
+ for (size_type i=0; i<row_indices.size(); ++i)
add (row_indices[i], col_indices.size(), &col_indices[0],
&values(i,0), elide_zero_values);
}
inline
void
- SparseMatrix::add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ SparseMatrix::add (const size_type row,
+ const std::vector<size_type> &col_indices,
const std::vector<TrilinosScalar> &values,
const bool elide_zero_values)
{
inline
void
- SparseMatrix::add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ SparseMatrix::add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
const TrilinosScalar *values,
const bool elide_zero_values,
const bool /*col_indices_are_sorted*/)
last_action = Add;
- int *col_index_ptr;
+ int_type *col_index_ptr;
TrilinosScalar const *col_value_ptr;
- int n_columns;
+ int_type n_columns;
// If we don't elide zeros, the pointers
// are already available...
if (elide_zero_values == false)
{
- col_index_ptr = (int *)col_indices;
+ col_index_ptr = (int_type *)col_indices;
col_value_ptr = values;
n_columns = n_cols;
#ifdef DEBUG
- for (unsigned int j=0; j<n_cols; ++j)
+ for (size_type j=0; j<n_cols; ++j)
Assert (numbers::is_finite(values[j]), ExcNumberNotFinite());
#endif
}
}
n_columns = 0;
- for (unsigned int j=0; j<n_cols; ++j)
+ for (size_type j=0; j<n_cols; ++j)
{
const double value = values[j];
Assert (numbers::is_finite(value), ExcNumberNotFinite());
}
}
- Assert(n_columns <= (int)n_cols, ExcInternalError());
+ Assert(n_columns <= (int_type)n_cols, ExcInternalError());
- col_index_ptr = (int *)&column_indices[0];
+ col_index_ptr = (int_type *)&column_indices[0];
col_value_ptr = &column_values[0];
}
// can directly call the Epetra_CrsMatrix
// input function, which is much faster
// than the Epetra_FECrsMatrix function.
- if (row_partitioner().MyGID(static_cast<int>(row)) == true)
+ if (row_partitioner().MyGID(static_cast<int_type>(row)) == true)
{
ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row, n_columns,
const_cast<double *>(col_value_ptr),
// one element at a time).
compressed = false;
- ierr = matrix->SumIntoGlobalValues (1, (int *)&row, n_columns,
+ ierr = matrix->SumIntoGlobalValues (1, (int_type *)&row, n_columns,
col_index_ptr,
&col_value_ptr,
Epetra_FECrsMatrix::ROW_MAJOR);
std::cout << "Got error " << ierr << " in row " << row
<< " of proc " << row_partitioner().Comm().MyPID()
<< " when trying to add the columns:" << std::endl;
- for (int i=0; i<n_columns; ++i)
+ for (int_type i=0; i<n_columns; ++i)
std::cout << col_index_ptr[i] << " ";
std::cout << std::endl << std::endl;
std::cout << "Matrix row has the following indices:" << std::endl;
- int n_indices, *indices;
- trilinos_sparsity_pattern().ExtractMyRowView(row_partitioner().LID(static_cast<int>(row)),
+ int_type n_indices, *indices;
+ trilinos_sparsity_pattern().ExtractMyRowView(row_partitioner().LID(static_cast<int_type>(row)),
n_indices,
indices);
- for (int i=0; i<n_indices; ++i)
+ for (int_type i=0; i<n_indices; ++i)
std::cout << indices[i] << " ";
std::cout << endl << std::endl;
Assert (ierr <= 0,
// called frequently and do only involve
// a call to some Trilinos function.
inline
- unsigned int
+ size_type
SparseMatrix::m () const
{
return matrix -> NumGlobalRows();
inline
- unsigned int
+ size_type
SparseMatrix::n () const
{
return matrix -> NumGlobalCols();
inline
- unsigned int
+ size_type
SparseMatrix::local_size () const
{
return matrix -> NumMyRows();
inline
- std::pair<unsigned int, unsigned int>
+ std::pair<size_type, size_type>
SparseMatrix::local_range () const
{
- unsigned int begin, end;
+ size_type begin, end;
begin = matrix -> RowMap().MinMyGID();
end = matrix -> RowMap().MaxMyGID()+1;
inline
- unsigned int
+ size_type
SparseMatrix::n_nonzero_elements () const
{
return matrix->NumGlobalNonzeros();
Assert (&src != &dst, ExcSourceEqualsDestination());
Assert (matrix->Filled(), ExcMatrixNotCompressed());
- AssertDimension (dst.local_size(), static_cast<unsigned int>(matrix->RangeMap().NumMyElements()));
- AssertDimension (src.local_size(), static_cast<unsigned int>(matrix->DomainMap().NumMyElements()));
+ AssertDimension (dst.local_size(), static_cast<size_type>(matrix->RangeMap().NumMyElements()));
+ AssertDimension (src.local_size(), static_cast<size_type>(matrix->DomainMap().NumMyElements()));
Epetra_Vector tril_dst (View, matrix->RangeMap(), dst.begin());
Epetra_Vector tril_src (View, matrix->DomainMap(),
Assert (&src != &dst, ExcSourceEqualsDestination());
Assert (matrix->Filled(), ExcMatrixNotCompressed());
- AssertDimension (static_cast<unsigned int>(matrix->DomainMap().NumMyElements()),
- static_cast<unsigned int>(matrix->DomainMap().NumGlobalElements()));
- AssertDimension (dst.size(), static_cast<unsigned int>(matrix->RangeMap().NumMyElements()));
- AssertDimension (src.size(), static_cast<unsigned int>(matrix->DomainMap().NumMyElements()));
+ AssertDimension (static_cast<size_type>(matrix->DomainMap().NumMyElements()),
+ static_cast<size_type>(matrix->DomainMap().NumGlobalElements()));
+ AssertDimension (dst.size(), static_cast<size_type>(matrix->RangeMap().NumMyElements()));
+ AssertDimension (src.size(), static_cast<size_type>(matrix->DomainMap().NumMyElements()));
Epetra_Vector tril_dst (View, matrix->RangeMap(), dst.begin());
Epetra_Vector tril_src (View, matrix->DomainMap(),
Assert (&src != &dst, ExcSourceEqualsDestination());
Assert (matrix->Filled(), ExcMatrixNotCompressed());
- AssertDimension (dst.local_size(), static_cast<unsigned int>(matrix->DomainMap().NumMyElements()));
- AssertDimension (src.local_size(), static_cast<unsigned int>(matrix->RangeMap().NumMyElements()));
+ AssertDimension (dst.local_size(), static_cast<size_type>(matrix->DomainMap().NumMyElements()));
+ AssertDimension (src.local_size(), static_cast<size_type>(matrix->RangeMap().NumMyElements()));
Epetra_Vector tril_dst (View, matrix->DomainMap(), dst.begin());
Epetra_Vector tril_src (View, matrix->RangeMap(),
Assert (&src != &dst, ExcSourceEqualsDestination());
Assert (matrix->Filled(), ExcMatrixNotCompressed());
- AssertDimension (static_cast<unsigned int>(matrix->DomainMap().NumMyElements()),
- static_cast<unsigned int>(matrix->DomainMap().NumGlobalElements()));
- AssertDimension (dst.size(), static_cast<unsigned int>(matrix->DomainMap().NumMyElements()));
- AssertDimension (src.size(), static_cast<unsigned int>(matrix->RangeMap().NumMyElements()));
+ AssertDimension (static_cast<size_type>(matrix->DomainMap().NumMyElements()),
+ static_cast<size_type>(matrix->DomainMap().NumGlobalElements()));
+ AssertDimension (dst.size(), static_cast<size_type>(matrix->DomainMap().NumMyElements()));
+ AssertDimension (src.size(), static_cast<size_type>(matrix->RangeMap().NumMyElements()));
Epetra_Vector tril_dst (View, matrix->DomainMap(), dst.begin());
Epetra_Vector tril_src (View, matrix->RangeMap(),
class Accessor
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare type of integer.
+ */
+ typedef long long int_type;
+#endif
+
+
/**
* Constructor.
*/
Accessor (const SparsityPattern *sparsity_pattern,
- const unsigned int row,
- const unsigned int index);
+ const size_type row,
+ const size_type index);
/**
* Row number of the element
* represented by this object.
*/
- unsigned int row() const;
+ size_type row() const;
/**
* Index in row of the element
* represented by this object.
*/
- unsigned int index() const;
+ size_type index() const;
/**
* Column number of the element
* represented by this object.
*/
- unsigned int column() const;
+ size_type column() const;
/**
* Exception
* Exception
*/
DeclException3 (ExcAccessToNonlocalRow,
- int, int, int,
+ size_type, size_type, size_type,
<< "You tried to access row " << arg1
<< " of a distributed sparsity pattern, "
<< " but only rows " << arg2 << " through " << arg3
/**
* Current row number.
*/
- unsigned int a_row;
+ size_type a_row;
/**
* Current index in row.
*/
- unsigned int a_index;
+ size_type a_index;
/**
* Cache where we store the
* than one accessor can access
* this data if necessary.
*/
- std_cxx1x::shared_ptr<const std::vector<unsigned int> > colnum_cache;
+ std_cxx1x::shared_ptr<const std::vector<size_type> > colnum_cache;
/**
* Discard the old row caches
* the index within it.
*/
Iterator (const SparsityPattern *sparsity_pattern,
- const unsigned int row,
- const unsigned int index);
+ const size_type row,
+ const size_type index);
/**
* Prefix increment.
* Exception
*/
DeclException2 (ExcInvalidIndexWithinRow,
- int, int,
+ size_type, syze_type,
<< "Attempt to access element " << arg2
<< " of row " << arg1
<< " which doesn't have that many elements.");
* will reduce the setup time of the
* sparsity pattern.
*/
- SparsityPattern (const unsigned int m,
- const unsigned int n,
- const unsigned int n_entries_per_row = 0);
+ SparsityPattern (const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row = 0);
/**
* Generate a sparsity pattern that is
* each row (an information usually
* not available, though).
*/
- SparsityPattern (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparsityPattern (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row);
/**
* Copy constructor. Sets the calling
* pattern.
*/
void
- reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int n_entries_per_row = 0);
+ reinit (const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row = 0);
/**
* Initialize a sparsity pattern that
* each row.
*/
void
- reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row);
+ reinit (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row);
/**
* Copy function. Sets the calling
* increases the performance when
* creating the sparsity pattern.
*/
- SparsityPattern (const Epetra_Map ¶llel_partitioning,
- const unsigned int n_entries_per_row = 0);
+ SparsityPattern (const Epetra_Map ¶llel_partitioning,
+ const size_type n_entries_per_row = 0);
/**
* Same as before, but now use the
* sparsity pattern is designed to
* describe.
*/
- SparsityPattern (const Epetra_Map ¶llel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparsityPattern (const Epetra_Map ¶llel_partitioning,
+ const std::vector<size_type> &n_entries_per_row);
/**
* This constructor is similar to the
*/
SparsityPattern (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
- const unsigned int n_entries_per_row = 0);
+ const size_type n_entries_per_row = 0);
/**
* This constructor is similar to the
* the number of entries in each row of
* the newly generated matrix.
*/
- SparsityPattern (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparsityPattern (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
+ const std::vector<size_tyoe> &n_entries_per_row);
/**
* Reinitialization function for
* add() function.
*/
void
- reinit (const Epetra_Map ¶llel_partitioning,
- const unsigned int n_entries_per_row = 0);
+ reinit (const Epetra_Map ¶llel_partitioning,
+ const size_type n_entries_per_row = 0);
/**
* Same as before, but now use the
* designed to describe.
*/
void
- reinit (const Epetra_Map ¶llel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
+ reinit (const Epetra_Map ¶llel_partitioning,
+ const std::vector<size_tyep> &n_entries_per_row);
/**
* This reinit function is similar to
void
reinit (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
- const unsigned int n_entries_per_row = 0);
+ const size_type n_entries_per_row = 0);
/**
* This reinit function is similar to
* matrix.
*/
void
- reinit (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
+ reinit (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
+ const std::vector<size_type> &n_entries_per_row);
/**
* Reinit function. Takes one of the
* increases the performance when
* creating the sparsity pattern.
*/
- SparsityPattern (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_entries_per_row = 0);
+ SparsityPattern (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
/**
* Same as before, but now use the
*/
SparsityPattern (const IndexSet ¶llel_partitioning,
const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
+ const std::vector<size_type> &n_entries_per_row);
/**
* This constructor is similar to the
* row is specified as the maximum
* number of entries argument.
*/
- SparsityPattern (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_entries_per_row = 0);
+ SparsityPattern (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
/**
* This constructor is similar to the
* the number of entries in each row of
* the newly generated matrix.
*/
- SparsityPattern (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
+ SparsityPattern (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<size_type> &n_entries_per_row);
/**
* Reinitialization function for
* add() function.
*/
void
- reinit (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_entries_per_row = 0);
+ reinit (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
/**
* Same as before, but now use the
* designed to describe.
*/
void
- reinit (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
+ reinit (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<size_type> &n_entries_per_row);
/**
* This reinit function is similar to
* <tt>n_entries_per_row</tt>.
*/
void
- reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_entries_per_row = 0);
+ reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
/**
* Same as before, but now using a
* sparsity pattern.
*/
void
- reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
+ reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<size_type> &n_entries_per_row);
/**
* Reinit function. Takes one of the
* entries per row on the current
* processor.
*/
- unsigned int max_entries_per_row () const;
+ size_type max_entries_per_row () const;
/**
* Return the number of rows in this
* sparsity pattern.
*/
- unsigned int n_rows () const;
+ size_type n_rows () const;
/**
* Return the number of columns in
* this sparsity pattern.
*/
- unsigned int n_cols () const;
+ size_type n_cols () const;
/**
* Return the local dimension of the
* exactly are stored locally,
* use local_range().
*/
- unsigned int local_size () const;
+ size_type local_size () const;
/**
* Return a pair of indices
* it will be a pair (i,i+n), where
* <tt>n=local_size()</tt>.
*/
- std::pair<unsigned int, unsigned int>
+ std::pair<size_type, size_type>
local_range () const;
/**
* in the local range or not,
* see also local_range().
*/
- bool in_local_range (const unsigned int index) const;
+ bool in_local_range (const size_type index) const;
/**
* Return the number of nonzero
* elements of this sparsity pattern.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Number of entries in a
* specific row.
*/
- unsigned int row_length (const unsigned int row) const;
+ size_type row_length (const size_type row) const;
/**
* Compute the bandwidth of the
* bandwidth a $n\times m$ matrix can
* have is $\max\{n-1,m-1\}$.
*/
- unsigned int bandwidth () const;
+ size_type bandwidth () const;
/**
* Return whether the object is
* sparsity pattern (i.e., it may be
* non-zero) or not.
*/
- bool exists (const unsigned int i,
- const unsigned int j) const;
+ bool exists (const size_type i,
+ const size_type j) const;
/**
* Determine an estimate for the
* Add the element (<i>i,j</i>) to
* the sparsity pattern.
*/
- void add (const unsigned int i,
- const unsigned int j);
+ void add (const size_type i,
+ const size_type j);
/**
* the sparsity pattern.
*/
template <typename ForwardIterator>
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
+ void add_entries (const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted = false);
//@}
/**
* @name Access of underlying Trilinos data
* that the iterator may not be
* dereferencable in that case.
*/
- const_iterator begin (const unsigned int r) const;
+ const_iterator begin (const size_type r) const;
/**
* Final iterator of row
* the end iterator for the
* last row of a matrix.
*/
- const_iterator end (const unsigned int r) const;
+ const_iterator end (const size_type r) const;
//@}
/**
* Exception
*/
DeclException2 (ExcInvalidIndex,
- int, int,
+ size_type, size_type,
<< "The entry with index <" << arg1 << ',' << arg2
<< "> does not exist.");
* Exception
*/
DeclException4 (ExcAccessToNonLocalElement,
- int, int, int, int,
+ size_type, size_type, size_type, size_type,
<< "You tried to access element (" << arg1
<< "/" << arg2 << ")"
<< " of a distributed matrix, but only rows "
* Exception
*/
DeclException2 (ExcAccessToNonPresentElement,
- int, int,
+ size_type, size_type,
<< "You tried to access element (" << arg1
<< "/" << arg2 << ")"
<< " of a sparse matrix, but it appears to not"
inline
Accessor::Accessor (const SparsityPattern *sp,
- const unsigned int row,
- const unsigned int index)
+ const size_type row,
+ const size_type index)
:
sparsity_pattern(const_cast<SparsityPattern *>(sp)),
a_row(row),
inline
- unsigned int
+ size_type
Accessor::row() const
{
Assert (a_row < sparsity_pattern->n_rows(), ExcBeyondEndOfSparsityPattern());
inline
- unsigned int
+ size_type
Accessor::column() const
{
Assert (a_row < sparsity_pattern->n_rows(), ExcBeyondEndOfSparsityPattern());
inline
- unsigned int
+ size_type
Accessor::index() const
{
Assert (a_row < sparsity_pattern->n_rows(), ExcBeyondEndOfSparsityPattern());
inline
Iterator::Iterator(const SparsityPattern *sp,
- const unsigned int row,
- const unsigned int index)
+ const size_type row,
+ const size_type index)
:
accessor(sp, row, index)
{}
inline
SparsityPattern::const_iterator
- SparsityPattern::begin(const unsigned int r) const
+ SparsityPattern::begin(const size_type r) const
{
Assert (r < n_rows(), ExcIndexRange(r, 0, n_rows()));
if (row_length(r) > 0)
inline
SparsityPattern::const_iterator
- SparsityPattern::end(const unsigned int r) const
+ SparsityPattern::end(const size_type r) const
{
Assert (r < n_rows(), ExcIndexRange(r, 0, n_rows()));
// place the iterator on the first entry
// past this line, or at the end of the
// matrix
- for (unsigned int i=r+1; i<n_rows(); ++i)
+ for (size_type i=r+1; i<n_rows(); ++i)
if (row_length(i) > 0)
return const_iterator(this, i, 0);
inline
bool
- SparsityPattern::in_local_range (const unsigned int index) const
+ SparsityPattern::in_local_range (const size_type index) const
{
- int begin, end;
+ int_type begin, end;
begin = graph->RowMap().MinMyGID();
end = graph->RowMap().MaxMyGID()+1;
- return ((index >= static_cast<unsigned int>(begin)) &&
- (index < static_cast<unsigned int>(end)));
+ return ((index >= static_cast<size_type>(begin)) &&
+ (index < static_cast<size_type>(end)));
}
inline
void
- SparsityPattern::add (const unsigned int i,
- const unsigned int j)
+ SparsityPattern::add (const size_type i,
+ const size_type j)
{
add_entries (i, &j, &j+1);
}
template <typename ForwardIterator>
inline
void
- SparsityPattern::add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool /*indices_are_sorted*/)
+ SparsityPattern::add_entries (const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool /*indices_are_sorted*/)
{
if (begin == end)
return;
- int *col_index_ptr = (int *)(&*begin);
- const int n_cols = static_cast<int>(end - begin);
+ int_type *col_index_ptr = (int_type *)(&*begin);
+ const int_type n_cols = static_cast<int_type>(end - begin);
compressed = false;
- const int ierr = graph->InsertGlobalIndices (1, (int *)&row,
+ const int ierr = graph->InsertGlobalIndices (1, (int_type *)&row,
n_cols, col_index_ptr);
AssertThrow (ierr >= 0, ExcTrilinosError(ierr));
inline
- SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
- const unsigned int n_entries_per_row)
+ SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator,
+ const size_type n_entries_per_row)
:
compressed (false)
{
inline
SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning,
const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
:
compressed (false)
{
inline
- SparsityPattern::SparsityPattern (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const unsigned int n_entries_per_row)
+ SparsityPattern::SparsityPattern (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const size_type n_entries_per_row)
:
compressed (false)
{
SparsityPattern (const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
:
compressed (false)
{
inline
void
- SparsityPattern::reinit (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
- const unsigned int n_entries_per_row)
+ SparsityPattern::reinit (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator,
+ const size_type n_entries_per_row)
{
Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
false);
inline
void SparsityPattern::reinit (const IndexSet ¶llel_partitioning,
const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
{
Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
false);
inline
- void SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const unsigned int n_entries_per_row)
+ void SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const size_type n_entries_per_row)
{
Epetra_Map row_map =
row_parallel_partitioning.make_trilinos_map (communicator, false);
SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<int_type> &n_entries_per_row)
{
Epetra_Map row_map =
row_parallel_partitioning.make_trilinos_map (communicator, false);
* @verbatim
* TrilinosWrappers::Vector vector;
* // do some write operations on the vector
- * for (unsigned int i=0; i<vector->size(); ++i)
+ * for (size_type i=0; i<vector->size(); ++i)
* vector(i) = i;
*
* // do some additions to vector elements, but
* // only for some elements
- * for (unsigned int i=0; i<vector->size(); ++i)
+ * for (size_type i=0; i<vector->size(); ++i)
* if (some_condition(i) == true)
* vector(i) += 1;
*
class Vector : public VectorBase
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare type of integer.
+ */
+ typedef long long int_type;
+#endif
+
+
/**
* @name Basic constructors and initalization.
*/
has_ghosts = vector->Map().UniqueGIDs()==false;
- const int size = parallel_partitioner.NumMyElements();
+ const int_type size = parallel_partitioner.NumMyElements();
// Need to copy out values, since the
// deal.II might not use doubles, so
// that a direct access is not possible.
- for (int i=0; i<size; ++i)
+ for (int_type i=0; i<size; ++i)
(*vector)[0][i] = v(parallel_partitioner.GID(i));
}
class Vector : public VectorBase
{
public:
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare type for container size.
+ */
+ typedef unsigned int size_type;
+
+ /**
+ * Declare type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare type for container size.
+ */
+ typedef uint64_t size_type;
+
+ /**
+ * Declare type of integer.
+ */
+ typedef long long int_type;
+#endif
+
/**
* Default constructor that
* generates an empty (zero size)
* input the number of elements
* in the vector.
*/
- Vector (const unsigned int n);
+ Vector (const size_type n);
/**
* This constructor takes as
* the vector to the size
* specified by <tt>n</tt>.
*/
- void reinit (const unsigned int n,
- const bool fast = false);
+ void reinit (const size_type n,
+ const bool fast = false);
/**
* Initialization with an
template <typename number>
Vector::Vector (const dealii::Vector<number> &v)
{
- Epetra_LocalMap map ((int)v.size(), 0, Utilities::Trilinos::comm_self());
+ Epetra_LocalMap map ((int_type)v.size(), 0, Utilities::Trilinos::comm_self());
vector.reset (new Epetra_FEVector(map));
*this = v;
}
{
vector.reset();
- Epetra_LocalMap map ((int)v.size(), 0,
+ Epetra_LocalMap map ((int_type)v.size(), 0,
Utilities::Trilinos::comm_self());
vector.reset (new Epetra_FEVector(map));
}
const Epetra_Map &map = vector_partitioner();
- const int size = map.NumMyElements();
+ const int_type size = map.NumMyElements();
Assert (map.MaxLID() == size-1,
ExcDimensionMismatch(map.MaxLID(), size-1));
// Need to copy out values, since the
// deal.II might not use doubles, so
// that a direct access is not possible.
- for (int i=0; i<size; ++i)
+ for (int_type i=0; i<size; ++i)
(*vector)[0][i] = v(i);
return *this;
* the actual vector class to
* create it.
*/
- VectorReference (VectorBase &vector,
- const unsigned int index);
+ VectorReference (VectorBase &vector,
+ const size_type index);
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare type of integer.
+ */
+ typedef long long int_type;
+#endif
+
/**
* This looks like a copy
* operator, but does something
* Exception
*/
DeclException3 (ExcAccessToNonLocalElement,
- int, int, int,
+ size_type, size_type, size_type,
<< "You tried to access element " << arg1
<< " of a distributed vector, but only elements "
<< arg2 << " through " << arg3
* Index of the referenced element
* of the vector.
*/
- const unsigned int index;
+ const size_type index;
/**
* Make the vector class a
typedef internal::VectorReference reference;
typedef const internal::VectorReference const_reference;
+#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+ /**
+ * Declare type of integer.
+ */
+ typedef int int_type;
+#else
+ /**
+ * Declare type of integer.
+ */
+ typedef long long int_type;
+#endif
+
/**
* @name 1: Basic Object-handling
*/
* Return the global dimension of
* the vector.
*/
- unsigned int size () const;
+ size_type size () const;
/**
* Return the local dimension of
* elements, they are included in
* this number.
*/
- unsigned int local_size () const;
+ size_type local_size () const;
/**
* Return a pair of indices
* be a pair (i,i+n), where
* <tt>n=local_size()</tt>.
*/
- std::pair<unsigned int, unsigned int> local_range () const;
+ std::pair<size_type, size_type> local_range () const;
/**
* Return whether @p index is in
* the local range or not, see
* also local_range().
*/
- bool in_local_range (const unsigned int index) const;
+ bool in_local_range (const size_type index) const;
/**
* Return if the vector contains ghost
* element, both read and write.
*/
reference
- operator () (const unsigned int index);
+ operator () (const size_type index);
/**
* Provide read-only access to an
* the <code>el()</code> command.
*/
TrilinosScalar
- operator () (const unsigned int index) const;
+ operator () (const size_type index) const;
/**
* Provide access to a given
* Exactly the same as operator().
*/
reference
- operator [] (const unsigned int index);
+ operator [] (const size_type index);
/**
* Provide read-only access to an
* Exactly the same as operator().
*/
TrilinosScalar
- operator [] (const unsigned int index) const;
+ operator [] (const size_type index) const;
/**
* Return the value of the vector
* elements sits on another
* process.
*/
- TrilinosScalar el (const unsigned int index) const;
+ TrilinosScalar el (const size_type index) const;
/**
* A collective set operation:
* argument, the corresponding
* values in the second.
*/
- void set (const std::vector<unsigned int> &indices,
+ void set (const std::vector<size_type> &indices,
const std::vector<TrilinosScalar> &values);
/**
* takes a deal.II vector of
* values.
*/
- void set (const std::vector<unsigned int> &indices,
+ void set (const std::vector<size_type> &indices,
const ::dealii::Vector<TrilinosScalar> &values);
//@}
* the number of elements to be
* set.
*/
- void set (const unsigned int n_elements,
- const unsigned int *indices,
+ void set (const size_type n_elements,
+ const size_type *indices,
const TrilinosScalar *values);
/**
* components specified by @p
* indices.
*/
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const std::vector<TrilinosScalar> &values);
/**
* takes a deal.II vector of
* values.
*/
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const ::dealii::Vector<TrilinosScalar> &values);
/**
* other two <tt>add()</tt>
* functions above.
*/
- void add (const unsigned int n_elements,
- const unsigned int *indices,
+ void add (const size_type n_elements,
+ const size_type *indices,
const TrilinosScalar *values);
/**
* Exception
*/
DeclException3 (ExcAccessToNonlocalElement,
- int, int, int,
+ size_type, size_type, size_type,
<< "You tried to access element " << arg1
<< " of a distributed vector, but only entries "
<< arg2 << " through " << arg3
namespace internal
{
inline
- VectorReference::VectorReference (VectorBase &vector,
- const unsigned int index)
+ VectorReference::VectorReference (VectorBase &vector,
+ const size_t index)
:
vector (vector),
index (index)
inline
bool
- VectorBase::in_local_range (const unsigned int index) const
+ VectorBase::in_local_range (const size_type index) const
{
- std::pair<unsigned int, unsigned int> range = local_range();
+ std::pair<size_type, size_type> range = local_range();
return ((index >= range.first) && (index < range.second));
}
inline
internal::VectorReference
- VectorBase::operator () (const unsigned int index)
+ VectorBase::operator () (const size_type index)
{
return internal::VectorReference (*this, index);
}
inline
internal::VectorReference
- VectorBase::operator [] (const unsigned int index)
+ VectorBase::operator [] (const size_type index)
{
return operator() (index);
}
inline
TrilinosScalar
- VectorBase::operator [] (const unsigned int index) const
+ VectorBase::operator [] (const size_type index) const
{
return operator() (index);
}
inline
void
- VectorBase::set (const std::vector<unsigned int> &indices,
+ VectorBase::set (const std::vector<size_type> &indices,
const std::vector<TrilinosScalar> &values)
{
// if we have ghost values, do not allow
inline
void
- VectorBase::set (const std::vector<unsigned int> &indices,
+ VectorBase::set (const std::vector<size_type> &indices,
const ::dealii::Vector<TrilinosScalar> &values)
{
// if we have ghost values, do not allow
inline
void
- VectorBase::set (const unsigned int n_elements,
- const unsigned int *indices,
+ VectorBase::set (const size_type n_elements,
+ const size_type *indices,
const TrilinosScalar *values)
{
// if we have ghost values, do not allow
if (last_action != Insert)
last_action = Insert;
- for (unsigned int i=0; i<n_elements; ++i)
+ for (size_type i=0; i<n_elements; ++i)
{
- const unsigned int row = indices[i];
- const int local_row = vector->Map().LID(static_cast<int>(row));
+ const size_type row = indices[i];
+ const int_type local_row = vector->Map().LID(static_cast<int_type>(row));
if (local_row == -1)
{
const int ierr = vector->ReplaceGlobalValues (1,
- (const int *)(&row),
+ (const int_type *)(&row),
&values[i]);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
compressed = false;
inline
void
- VectorBase::add (const std::vector<unsigned int> &indices,
+ VectorBase::add (const std::vector<size_type> &indices,
const std::vector<TrilinosScalar> &values)
{
// if we have ghost values, do not allow
inline
void
- VectorBase::add (const std::vector<unsigned int> &indices,
+ VectorBase::add (const std::vector<size_type> &indices,
const ::dealii::Vector<TrilinosScalar> &values)
{
// if we have ghost values, do not allow
inline
void
- VectorBase::add (const unsigned int n_elements,
- const unsigned int *indices,
+ VectorBase::add (const size_type n_elements,
+ const size_type *indices,
const TrilinosScalar *values)
{
// if we have ghost values, do not allow
last_action = Add;
}
- for (unsigned int i=0; i<n_elements; ++i)
+ for (size_type i=0; i<n_elements; ++i)
{
- const unsigned int row = indices[i];
- const int local_row = vector->Map().LID(static_cast<int>(row));
+ const size_type row = indices[i];
+ const int_type local_row = vector->Map().LID(static_cast<int_type>(row));
if (local_row == -1)
{
const int ierr = vector->SumIntoGlobalValues (1,
- (const int *)(&row),
+ (const int_type *)(&row),
&values[i]);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
compressed = false;
inline
- unsigned int
+ size_type
VectorBase::size () const
{
- return (unsigned int) (vector->Map().MaxAllGID() + 1 -
+ return (size_type) (vector->Map().MaxAllGID() + 1 -
vector->Map().MinAllGID());
}
inline
- unsigned int
+ size_type
VectorBase::local_size () const
{
- return (unsigned int) vector->Map().NumMyElements();
+ return (size_type) vector->Map().NumMyElements();
}
inline
- std::pair<unsigned int, unsigned int>
+ std::pair<size_type, size_type>
VectorBase::local_range () const
{
- int begin, end;
+ int_type begin, end;
begin = vector->Map().MinMyGID();
end = vector->Map().MaxMyGID()+1;
return std::make_pair (begin, end);
TrilinosScalar norm = 0;
TrilinosScalar sum=0;
- const unsigned int n_local = local_size();
+ const size_type n_local = local_size();
// loop over all the elements because
// Trilinos does not support lp norms
- for (unsigned int i=0; i<n_local; i++)
+ for (size_type i=0; i<n_local; i++)
sum += std::pow(std::fabs((*vector)[0][i]), p);
norm = std::pow(sum, static_cast<TrilinosScalar>(1./p));
Assert (!has_ghost_elements(), ExcGhostsPresent());
Assert (numbers::is_finite(s), ExcNumberNotFinite());
- unsigned int n_local = local_size();
- for (unsigned int i=0; i<n_local; i++)
+ size_type n_local = local_size();
+ for (size_type i=0; i<n_local; i++)
(*vector)[0][i] += s;
}
* i.e. the vector is replaced by
* one of length zero.
*/
- explicit Vector (const unsigned int n);
+ explicit Vector (const size_type n);
/**
* Initialize the vector with a
* classes to handle memory
* separately.
*/
- virtual void reinit (const unsigned int N,
- const bool fast=false);
+ virtual void reinit (const size_type N,
+ const bool fast=false);
/**
* Change the dimension to that of the
/**
* Return dimension of the vector.
*/
- unsigned int size () const;
+ size_type size () const;
/**
* Return whether the vector contains only
* Access the value of the @p ith
* component.
*/
- Number operator() (const unsigned int i) const;
+ Number operator() (const size_type i) const;
/**
* Access the @p ith component
* as a writeable reference.
*/
- Number &operator() (const unsigned int i);
+ Number &operator() (const size_type i);
/**
* Access the value of the @p ith
*
* Exactly the same as operator().
*/
- Number operator[] (const unsigned int i) const;
+ Number operator[] (const size_type i) const;
/**
* Access the @p ith component
*
* Exactly the same as operator().
*/
- Number &operator[] (const unsigned int i);
+ Number &operator[] (const size_type i);
//@}
* indices.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const std::vector<OtherNumber> &values);
/**
* values.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const Vector<OtherNumber> &values);
+ void add (const std::vector<size_type> &indices,
+ const Vector<OtherNumber> &values);
/**
* Take an address where
* functions above.
*/
template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
+ void add (const size_type n_elements,
+ const size_type *indices,
const OtherNumber *values);
/**
* memory consumption (in bytes)
* of this object.
*/
- std::size_t memory_consumption () const;
+ size_type memory_consumption () const;
//@}
/**
* vector. Get this number by
* calling <tt>size()</tt>.
*/
- unsigned int vec_size;
+ size_type vec_size;
/**
* Amount of memory actually
* memory when the number of
* needed elements is reduced.
*/
- unsigned int max_vec_size;
+ size_type max_vec_size;
/**
* Pointer to the array of
template <typename Number>
inline
-Vector<Number>::Vector (const unsigned int n)
+Vector<Number>::Vector (const size_type n)
:
vec_size(0),
max_vec_size(0),
template <typename Number>
inline
-void Vector<Number>::reinit (const unsigned int n, const bool fast)
+void Vector<Number>::reinit (const size_type n, const bool fast)
{
if (n==0)
{
template <typename Number>
inline
-unsigned int Vector<Number>::size () const
+size_type Vector<Number>::size () const
{
return vec_size;
}
template <typename Number>
inline
-Number Vector<Number>::operator() (const unsigned int i) const
+Number Vector<Number>::operator() (const size_type i) const
{
Assert (i<vec_size, ExcIndexRange(i,0,vec_size));
return val[i];
template <typename Number>
inline
-Number &Vector<Number>::operator() (const unsigned int i)
+Number &Vector<Number>::operator() (const size_type i)
{
Assert (i<vec_size, ExcIndexRange(i,0,vec_size));
return val[i];
template <typename Number>
inline
-Number Vector<Number>::operator[] (const unsigned int i) const
+Number Vector<Number>::operator[] (const size_type i) const
{
return operator()(i);
}
template <typename Number>
inline
-Number &Vector<Number>::operator[] (const unsigned int i)
+Number &Vector<Number>::operator[] (const size_type i)
{
return operator()(i);
}
template <typename OtherNumber>
inline
void
-Vector<Number>::add (const std::vector<unsigned int> &indices,
+Vector<Number>::add (const std::vector<size_type> &indices,
const std::vector<OtherNumber> &values)
{
Assert (indices.size() == values.size(),
template <typename OtherNumber>
inline
void
-Vector<Number>::add (const std::vector<unsigned int> &indices,
- const Vector<OtherNumber> &values)
+Vector<Number>::add (const std::vector<size_type> &indices,
+ const Vector<OtherNumber> &values)
{
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(), values.size()));
template <typename OtherNumber>
inline
void
-Vector<Number>::add (const unsigned int n_indices,
- const unsigned int *indices,
+Vector<Number>::add (const size_type n_indices,
+ const size_type *indices,
const OtherNumber *values)
{
- for (unsigned int i=0; i<n_indices; ++i)
+ for (size_type i=0; i<n_indices; ++i)
{
Assert (indices[i] < vec_size, ExcIndexRange(indices[i],0,vec_size));
Assert (numbers::is_finite(values[i]),
{
Assert (vec_size!=0, ExcEmptyObject());
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
if (val[i] != Number(0))
return false;
return true;
{
Assert (vec_size!=0, ExcEmptyObject());
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
if ( ! internal::is_non_negative (val[i]))
return false;
{
template<typename T>
void set_subrange (const T s,
- const unsigned int begin,
- const unsigned int end,
+ const size_type begin,
+ const size_type end,
dealii::Vector<T> &dst)
{
if (s == T())
}
template<typename T>
- void copy_subrange (const unsigned int begin,
- const unsigned int end,
+ void copy_subrange (const size_type begin,
+ const size_type end,
const dealii::Vector<T> &src,
dealii::Vector<T> &dst)
{
}
template<typename T, typename U>
- void copy_subrange (const unsigned int begin,
- const unsigned int end,
+ void copy_subrange (const size_type begin,
+ const size_type end,
const dealii::Vector<T> &src,
dealii::Vector<U> &dst)
{
}
template<typename T, typename U>
- void copy_subrange_wrap (const unsigned int begin,
- const unsigned int end,
+ void copy_subrange_wrap (const size_type begin,
+ const size_type end,
const dealii::Vector<T> &src,
dealii::Vector<U> &dst)
{
void copy_vector (const dealii::Vector<T> &src,
dealii::Vector<U> &dst)
{
- const unsigned int vec_size = src.size();
- const unsigned int dst_size = dst.size();
+ const size_type vec_size = src.size();
+ const size_type dst_size = dst.size();
if (dst_size != vec_size)
dst.reinit (vec_size, true);
if (vec_size>internal::Vector::minimum_parallel_grain_size)
const Number *X,
const Number2 *Y,
const ResultType power,
- const unsigned int vec_size,
+ const size_type vec_size,
ResultType &result)
{
if (vec_size <= 4096)
// order to obtain known loop bounds for most of the work.
const Number *X_original = X;
ResultType outer_results [128];
- unsigned int n_chunks = vec_size / 32;
- const unsigned int remainder = vec_size % 32;
+ size_type n_chunks = vec_size / 32;
+ const size_type remainder = vec_size % 32;
Assert (remainder == 0 || n_chunks < 128, ExcInternalError());
- for (unsigned int i=0; i<n_chunks; ++i)
+ for (size_type i=0; i<n_chunks; ++i)
{
ResultType r0 = op(X, Y, power);
- for (unsigned int j=1; j<8; ++j)
+ for (size_type j=1; j<8; ++j)
r0 += op(X, Y, power);
ResultType r1 = op(X, Y, power);
- for (unsigned int j=1; j<8; ++j)
+ for (size_type j=1; j<8; ++j)
r1 += op(X, Y, power);
r0 += r1;
r1 = op(X, Y, power);
- for (unsigned int j=1; j<8; ++j)
+ for (size_type j=1; j<8; ++j)
r1 += op(X, Y, power);
ResultType r2 = op(X, Y, power);
- for (unsigned int j=1; j<8; ++j)
+ for (size_type j=1; j<8; ++j)
r2 += op(X, Y, power);
r1 += r2;
r0 += r1;
// fall-through to work on these values.
if (remainder > 0)
{
- const unsigned int inner_chunks = remainder / 8;
+ const size_type inner_chunks = remainder / 8;
Assert (inner_chunks <= 3, ExcInternalError());
- const unsigned int remainder_inner = remainder % 8;
+ const size_type remainder_inner = remainder % 8;
ResultType r0 = ResultType(), r1 = ResultType(),
r2 = ResultType();
switch (inner_chunks)
{
case 3:
r2 = op(X, Y, power);
- for (unsigned int j=1; j<8; ++j)
+ for (size_type j=1; j<8; ++j)
r2 += op(X, Y, power);
// no break
case 2:
r1 = op(X, Y, power);
- for (unsigned int j=1; j<8; ++j)
+ for (size_type j=1; j<8; ++j)
r1 += op(X, Y, power);
r1 += r2;
// no break
case 1:
r2 = op(X, Y, power);
- for (unsigned int j=1; j<8; ++j)
+ for (size_type j=1; j<8; ++j)
r2 += op(X, Y, power);
// no break
default:
- for (unsigned int j=0; j<remainder_inner; ++j)
+ for (size_type j=0; j<remainder_inner; ++j)
r0 += op(X, Y, power);
r0 += r2;
r0 += r1;
{
if (n_chunks % 2 == 1)
outer_results[n_chunks++] = ResultType();
- for (unsigned int i=0; i<n_chunks; i+=2)
+ for (size_type i=0; i<n_chunks; i+=2)
outer_results[i/2] = outer_results[i] + outer_results[i+1];
n_chunks /= 2;
}
// split the vector into smaller pieces to be
// worked on recursively and create tasks for
// them. Make pieces divisible by 1024.
- const unsigned int new_size = (vec_size / 4096) * 1024;
+ const size_type new_size = (vec_size / 4096) * 1024;
ResultType r0, r1, r2, r3;
Threads::TaskGroup<> task_group;
task_group += Threads::new_task(&accumulate<Operation,Number,Number2,
// split vector into four pieces and work on
// the pieces recursively. Make pieces (except last)
// divisible by 1024.
- const unsigned int new_size = (vec_size / 4096) * 1024;
+ const size_type new_size = (vec_size / 4096) * 1024;
ResultType r0, r1, r2, r3;
accumulate (op, X, Y, power, new_size, r0);
accumulate (op, X+new_size, Y+new_size, power, new_size, r1);
{
real_type scale = 0.;
real_type sum = 1.;
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
{
if (val[i] != Number())
{
{
real_type scale = 0.;
real_type sum = 1.;
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
{
if (val[i] != Number())
{
real_type max = 0.;
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
max = std::max (numbers::NumberTraits<Number>::abs(val[i]), max);
return max;
(boost::lambda::_1 - boost::lambda::_2),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] -= v.val[i];
return *this;
(boost::lambda::_1 + v),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] += v;
}
(boost::lambda::_1 + boost::lambda::_2),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] += v.val[i];
}
(boost::lambda::_1 + a*boost::lambda::_2 + b*boost::lambda::_3),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] += a * v.val[i] + b * w.val[i];
}
(x*boost::lambda::_1 + boost::lambda::_2),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] = x * val[i] + v.val[i];
}
(x*boost::lambda::_1 + a*boost::lambda::_2 + b*boost::lambda::_3),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] = x*val[i] + a * v.val[i] + b * w.val[i];
}
(boost::lambda::_1*boost::lambda::_2),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] *= s.val[i];
}
Assert (vec_size!=0, ExcEmptyObject());
Assert (vec_size == s.vec_size, ExcDimensionMismatch(vec_size, s.vec_size));
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] *= s.val[i];
}
(a*boost::lambda::_1),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] = a * u.val[i];
}
// because
// operator*(complex<float>,complex<double>)
// is not defined by default
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] = a * Number(u.val[i]);
}
(a*boost::lambda::_1 + b*boost::lambda::_2),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] = a * u.val[i] + b * v.val[i];
}
(a*boost::lambda::_1 + b*boost::lambda::_2 + c*boost::lambda::_3),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] = a * u.val[i] + b * v.val[i] + c * w.val[i];
}
(boost::lambda::_1 / boost::lambda::_2),
internal::Vector::minimum_parallel_grain_size);
else if (vec_size > 0)
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
val[i] = a.val[i]/b.val[i];
}
if (v.size() != vec_size)
reinit (v.size(), true);
- unsigned int this_index = 0;
- for (unsigned int b=0; b<v.n_blocks(); ++b)
- for (unsigned int i=0; i<v.block(b).size(); ++i, ++this_index)
+ size_type this_index = 0;
+ for (size_type b=0; b<v.n_blocks(); ++b)
+ for (size_type i=0; i<v.block(b).size(); ++i, ++this_index)
val[this_index] = v.block(b)(i);
return *this;
// because
// operator==(complex<float>,complex<double>)
// is not defined by default
- for (unsigned int i=0; i<vec_size; ++i)
+ for (size_type i=0; i<vec_size; ++i)
if (val[i] != Number(v.val[i]))
return false;
{
Assert (vec_size!=0, ExcEmptyObject());
- for (unsigned int j=0; j<size(); ++j)
+ for (size_type j=0; j<size(); ++j)
internal::print (val[j], format);
std::printf ("\n");
}
out.setf (std::ios::fixed, std::ios::floatfield);
if (across)
- for (unsigned int i=0; i<size(); ++i)
+ for (size_type i=0; i<size(); ++i)
out << val[i] << ' ';
else
- for (unsigned int i=0; i<size(); ++i)
+ for (size_type i=0; i<size(); ++i)
out << val[i] << std::endl;
out << std::endl;
Assert (vec_size!=0, ExcEmptyObject());
if (across)
- for (unsigned int i=0; i<size(); ++i)
+ for (size_type i=0; i<size(); ++i)
out << std::setw(width) << val[i] << ' ';
else
- for (unsigned int i=0; i<size(); ++i)
+ for (size_type i=0; i<size(); ++i)
out << val[i] << std::endl;
out << std::endl;
}
// some resources that lead to
// problems in a multithreaded
// environment
- const unsigned int sz = size();
+ const size_type sz = size();
char buf[16];
std::sprintf(buf, "%d", sz);
{
AssertThrow (in, ExcIO());
- unsigned int sz;
+ size_type sz;
char buf[16];
template <typename Number>
-std::size_t
+size_type
Vector<Number>::memory_consumption () const
{
return sizeof(*this) + (max_vec_size * sizeof(Number));
class GrowingVectorMemory : public VectorMemory<VECTOR>
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Constructor. The argument
* allows to preallocate a
* certain number of vectors. The
* default is not to do this.
*/
- GrowingVectorMemory (const unsigned int initial_size = 0,
+ GrowingVectorMemory (const size_type initial_size = 0,
const bool log_statistics = false);
/**
* nothing after first
* initialization
*/
- void initialize(const unsigned int size);
+ void initialize(const size_type size);
/**
* Pointer to the storage
* object
* output at the end of an
* object's lifetime.
*/
- unsigned int total_alloc;
+ size_type total_alloc;
/**
* Number of vectors currently
* allocated in this object; used
* for detecting memory leaks.
*/
- unsigned int current_alloc;
+ size_type current_alloc;
/**
* A flag controlling the logging
class VectorView : public Vector<Number>
{
public:
+
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t int size_type;
+
/**
* Read write constructor. Takes the size
* of the vector, just like the standard
* from the location of the pointer @p
* ptr.
*/
- VectorView(const unsigned int new_size, Number *ptr);
+ VectorView(const size_type new_size, Number *ptr);
/**
* The constant constructor is the same
* construct it as a non const object or
* attempt to write on it.
*/
- VectorView(const unsigned int new_size, const Number *ptr);
+ VectorView(const size_type new_size, const Number *ptr);
/**
* This desctructor will only reset the
* call this reinit function if you
* really know what you are doing.
*/
- virtual void reinit (const unsigned int N,
+ virtual void reinit (const size_type N,
const bool fast=false);
/** This reinit function is
new object with the given
size, starting from the
pointer ptr. */
- void reinit(const unsigned int N, Number *ptr);
+ void reinit(const size_type N, Number *ptr);
/** This reinit function is
equivalent to constructing a
pointer ptr. The same
considerations made for the
constructor apply here. */
- void reinit(const unsigned int N, const Number *ptr);
+ void reinit(const size_type N, const Number *ptr);
/**
* This function is here to prevent
template<typename Number>
inline
-VectorView<Number>::VectorView(const unsigned int new_size, Number *ptr)
+VectorView<Number>::VectorView(const size_type new_size, Number *ptr)
{
this->vec_size = new_size;
this->max_vec_size = new_size;
template<typename Number>
inline
-VectorView<Number>::VectorView(const unsigned int new_size, const Number *ptr)
+VectorView<Number>::VectorView(const size_type new_size, const Number *ptr)
{
this->vec_size = new_size;
this->max_vec_size = new_size;
template<typename Number>
inline
-void VectorView<Number>::reinit(const unsigned int N, const bool fast)
+void VectorView<Number>::reinit(const size_type N, const bool fast)
{
this->vec_size = N;
this->max_vec_size = N;
template<typename Number>
inline
-void VectorView<Number>::reinit(const unsigned int new_size, Number *ptr)
+void VectorView<Number>::reinit(const size_type new_size, Number *ptr)
{
this->vec_size = new_size;
this->max_vec_size = new_size;
template<typename Number>
inline
-void VectorView<Number>::reinit(const unsigned int new_size, const Number *ptr)
+void VectorView<Number>::reinit(const size_type new_size, const Number *ptr)
{
this->vec_size = new_size;
this->max_vec_size = new_size;
template<typename number>
TridiagonalMatrix<number>::TridiagonalMatrix(
- unsigned int size,
+ size_type size,
bool symmetric)
:
diagonal(size, 0.),
template<typename number>
void
TridiagonalMatrix<number>::reinit(
- unsigned int size,
+ size_type size,
bool symmetric)
{
is_symmetric = symmetric;
// The actual loop skips the first
// and last row
- const unsigned int e=n()-1;
+ const size_type e=n()-1;
// Let iterators point to the first
// entry of each diagonal
typename std::vector<number>::const_iterator d = diagonal.begin();
++d;
++r;
// All rows with three entries
- for (unsigned int i=1; i<e; ++i,++d,++r,++l)
+ for (size_type i=1; i<e; ++i,++d,++r,++l)
w(i) += (*l) * v(i-1) + (*d) * v(i) + (*r) * v(i+1);
// Last row is special again
w(e) += (*l) * v(e-1) + (*d) * v(e);
w(0) = (*d) * v(0) + (*r) * v(1);
++d;
++r;
- for (unsigned int i=1; i<e; ++i,++d,++r,++l)
+ for (size_type i=1; i<e; ++i,++d,++r,++l)
w(i) = (*l) * v(i-1) + (*d) * v(i) + (*r) * v(i+1);
w(e) = (*l) * v(e-1) + (*d) * v(e);
}
if (n()==0) return;
- const unsigned int e=n()-1;
+ const size_type e=n()-1;
typename std::vector<number>::const_iterator d = diagonal.begin();
typename std::vector<number>::const_iterator r = right.begin();
typename std::vector<number>::const_iterator l = left.begin();
w(0) += (*d) * v(0) + (*l) * v(1);
++d;
++l;
- for (unsigned int i=1; i<e; ++i,++d,++r,++l)
+ for (size_type i=1; i<e; ++i,++d,++r,++l)
w(i) += (*l) * v(i+1) + (*d) * v(i) + (*r) * v(i-1);
w(e) += (*d) * v(e) + (*r) * v(e-1);
}
w(0) = (*d) * v(0) + (*l) * v(1);
++d;
++l;
- for (unsigned int i=1; i<e; ++i,++d,++r,++l)
+ for (size_type i=1; i<e; ++i,++d,++r,++l)
w(i) = (*l) * v(i+1) + (*d) * v(i) + (*r) * v(i-1);
w(e) = (*d) * v(e) + (*r) * v(e-1);
}
{
Assert(state == matrix, ExcState(state));
- const unsigned int e=n()-1;
+ const size_type e=n()-1;
typename std::vector<number>::const_iterator d = diagonal.begin();
typename std::vector<number>::const_iterator r = right.begin();
typename std::vector<number>::const_iterator l = left.begin();
number result = w(0) * ((*d) * v(0) + (*r) * v(1));
++d;
++r;
- for (unsigned int i=1; i<e; ++i,++d,++r,++l)
+ for (size_type i=1; i<e; ++i,++d,++r,++l)
result += w(i) * ((*l) * v(i-1)+ (*d) * v(i)+ (*r) * v(i+1));
result += w(e) * ((*l) * v(e-1) + (*d) * v(e));
return result;
template<typename number>
number
-TridiagonalMatrix<number>::eigenvalue(const unsigned int i) const
+TridiagonalMatrix<number>::eigenvalue(const size_type i) const
{
Assert(state == eigenvalues, ExcState(state));
Assert(i<n(), ExcIndexRange(i,0,n()));
void
BlockSparseMatrix::
- reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns)
+ reinit (const size_type n_block_rows,
+ const size_type n_block_columns)
{
// first delete previous content of
// the subobjects array
this->column_block_indices.reinit (n_block_columns, 0);
// and reinitialize the blocks
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
{
BlockType *p = new BlockType();
ExcDimensionMismatch (parallel_partitioning.size(),
block_sparsity_pattern.n_block_cols()));
- const unsigned int n_block_rows = parallel_partitioning.size();
+ const size_type n_block_rows = parallel_partitioning.size();
Assert (n_block_rows == block_sparsity_pattern.n_block_rows(),
ExcDimensionMismatch (n_block_rows,
// ... and then assign the correct
// data to the blocks.
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
{
this->sub_objects[r][c]->reinit (parallel_partitioning[r],
parallel_partitioning[c],
const MPI_Comm &communicator)
{
std::vector<Epetra_Map> epetra_maps;
- for (unsigned int i=0; i<block_sparsity_pattern.n_block_rows(); ++i)
+ for (size_type i=0; i<block_sparsity_pattern.n_block_rows(); ++i)
epetra_maps.push_back
(parallel_partitioning[i].make_trilinos_map(communicator, false));
reinit (const BlockSparsityType &block_sparsity_pattern)
{
std::vector<Epetra_Map> parallel_partitioning;
- for (unsigned int i=0; i<block_sparsity_pattern.n_block_rows(); ++i)
+ for (size_type i=0; i<block_sparsity_pattern.n_block_rows(); ++i)
parallel_partitioning.push_back
- (Epetra_Map(static_cast<int>(block_sparsity_pattern.block(i,0).n_rows()),
+ (Epetra_Map(static_cast<int_type>(block_sparsity_pattern.block(i,0).n_rows()),
0,
Utilities::Trilinos::comm_self()));
// ... and then assign the correct
// data to the blocks.
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
{
this->sub_objects[r][c]->reinit (block_sparsity_pattern.block(r,c));
}
const ::dealii::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
const double drop_tolerance)
{
- const unsigned int n_block_rows = parallel_partitioning.size();
+ const size_type n_block_rows = parallel_partitioning.size();
Assert (n_block_rows == dealii_block_sparse_matrix.n_block_rows(),
ExcDimensionMismatch (n_block_rows,
// ... and then assign the correct
// data to the blocks.
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
{
this->sub_objects[r][c]->reinit(parallel_partitioning[r],
parallel_partitioning[c],
#endif
std::vector<Epetra_Map> parallel_partitioning;
- for (unsigned int i=0; i<dealii_block_sparse_matrix.n_block_rows(); ++i)
- parallel_partitioning.push_back (Epetra_Map(static_cast<int>(dealii_block_sparse_matrix.block(i,0).m()),
+ for (size_type i=0; i<dealii_block_sparse_matrix.n_block_rows(); ++i)
+ parallel_partitioning.push_back (Epetra_Map(static_cast<int_type>(dealii_block_sparse_matrix.block(i,0).m()),
0,
trilinos_communicator));
- unsigned int
+ size_type
BlockSparseMatrix::n_nonzero_elements () const
{
- unsigned int n_nonzero = 0;
- for (unsigned int rows = 0; rows<this->n_block_rows(); ++rows)
- for (unsigned int cols = 0; cols<this->n_block_cols(); ++cols)
+ size_type n_nonzero = 0;
+ for (size_type rows = 0; rows<this->n_block_rows(); ++rows)
+ for (size_type cols = 0; cols<this->n_block_cols(); ++cols)
n_nonzero += this->block(rows,cols).n_nonzero_elements();
return n_nonzero;
}
-
+
BlockVector &
BlockVector::operator = (const BlockVector &v)
{
if (this->n_blocks() != v.n_blocks())
reinit(v.n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.block(i);
collect_sizes();
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(),v.n_blocks()));
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.block(i);
return *this;
BlockVector::reinit (const std::vector<Epetra_Map> &input_maps,
const bool fast)
{
- const unsigned int no_blocks = input_maps.size();
- std::vector<unsigned int> block_sizes (no_blocks);
+ const size_type no_blocks = input_maps.size();
+ std::vector<size_type> block_sizes (no_blocks);
- for (unsigned int i=0; i<no_blocks; ++i)
+ for (size_type i=0; i<no_blocks; ++i)
{
block_sizes[i] = input_maps[i].NumGlobalElements();
}
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].reinit(input_maps[i], fast);
collect_sizes();
const MPI_Comm &communicator,
const bool fast)
{
- const unsigned int no_blocks = parallel_partitioning.size();
- std::vector<unsigned int> block_sizes (no_blocks);
+ const size_type no_blocks = parallel_partitioning.size();
+ std::vector<size_type> block_sizes (no_blocks);
- for (unsigned int i=0; i<no_blocks; ++i)
+ for (size_type i=0; i<no_blocks; ++i)
{
block_sizes[i] = parallel_partitioning[i].size();
}
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].reinit(parallel_partitioning[i], communicator, fast);
collect_sizes();
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].reinit(v.block(i), fast, false);
collect_sizes();
void
- BlockVector::reinit (const unsigned int num_blocks)
+ BlockVector::reinit (const size_type num_blocks)
{
- std::vector<unsigned int> block_sizes (num_blocks, 0);
+ std::vector<size_type> block_sizes (num_blocks, 0);
this->block_indices.reinit (block_sizes);
if (this->components.size() != this->n_blocks())
this->components.resize(this->n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
components[i].clear();
collect_sizes();
components.resize(v.n_blocks());
}
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
components[i].import_nonlocal_data_for_fe(m.block(i,i), v.block(i));
collect_sizes();
const bool scientific,
const bool across) const
{
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
{
if (across)
out << 'C' << i << ':';
BlockVector::reinit (const std::vector<Epetra_Map> &input_maps,
const bool fast)
{
- unsigned int no_blocks = input_maps.size();
- std::vector<unsigned int> block_sizes (no_blocks);
+ size_type no_blocks = input_maps.size();
+ std::vector<size_type> block_sizes (no_blocks);
- for (unsigned int i=0; i<no_blocks; ++i)
+ for (size_type i=0; i<no_blocks; ++i)
block_sizes[i] = input_maps[i].NumGlobalElements();
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].reinit(input_maps[i], fast);
collect_sizes();
const MPI_Comm &communicator,
const bool fast)
{
- unsigned int no_blocks = partitioning.size();
- std::vector<unsigned int> block_sizes (no_blocks);
+ size_type no_blocks = partitioning.size();
+ std::vector<size_type> block_sizes (no_blocks);
- for (unsigned int i=0; i<no_blocks; ++i)
+ for (size_type i=0; i<no_blocks; ++i)
block_sizes[i] = partitioning[i].size();
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].reinit(partitioning[i], communicator, fast);
collect_sizes();
void
- BlockVector::reinit (const std::vector<unsigned int> &block_sizes,
- const bool fast)
+ BlockVector::reinit (const std::vector<size_type> &block_sizes,
+ const bool fast)
{
this->block_indices.reinit (block_sizes);
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].reinit(block_sizes[i], fast);
collect_sizes();
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i] = v.block(i);
}
void
- BlockVector::reinit (const unsigned int num_blocks)
+ BlockVector::reinit (const size_type num_blocks)
{
- std::vector<unsigned int> block_sizes (num_blocks, 0);
+ std::vector<size_type> block_sizes (num_blocks, 0);
block_indices.reinit (block_sizes);
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
block(i).clear();
collect_sizes();
if (components.size() != n_blocks())
components.resize(n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].reinit(v.block(i), fast);
collect_sizes();
{
if (n_blocks() != v.n_blocks())
{
- std::vector<unsigned int> block_sizes (v.n_blocks(), 0);
+ std::vector<size_type> block_sizes (v.n_blocks(), 0);
block_indices.reinit (block_sizes);
if (components.size() != n_blocks())
components.resize(n_blocks());
}
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.block(i);
collect_sizes();
const bool scientific,
const bool across) const
{
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
{
if (across)
out << 'C' << i << ':';
PreconditionAMG:: initialize (const SparseMatrix &matrix,
const AdditionalData &additional_data)
{
- const unsigned int n_rows = matrix.m();
+ const size_type n_rows = matrix.m();
// Build the AMG preconditioner.
Teuchos::ParameterList parameter_list;
const Epetra_Map &domain_map = matrix.domain_partitioner();
- const unsigned int constant_modes_dimension =
+ const size_type constant_modes_dimension =
additional_data.constant_modes.size();
Epetra_MultiVector distributed_constant_modes (domain_map,
constant_modes_dimension);
{
const bool constant_modes_are_global =
additional_data.constant_modes[0].size() == n_rows;
- const unsigned int n_relevant_rows =
+ const size_type n_relevant_rows =
constant_modes_are_global ? n_rows : additional_data.constant_modes[0].size();
- const unsigned int my_size = domain_map.NumMyElements();
+ const size_type my_size = domain_map.NumMyElements();
if (constant_modes_are_global == false)
Assert (n_relevant_rows == my_size,
ExcDimensionMismatch(n_relevant_rows, my_size));
Assert (n_rows ==
- static_cast<unsigned int>(distributed_constant_modes.GlobalLength()),
+ static_cast<size_type>(distributed_constant_modes.GlobalLength()),
ExcDimensionMismatch(n_rows,
distributed_constant_modes.GlobalLength()));
// contiguous vector of
// doubles so that Trilinos
// can read from it.
- for (unsigned int d=0; d<constant_modes_dimension; ++d)
- for (unsigned int row=0; row<my_size; ++row)
+ for (size_type d=0; d<constant_modes_dimension; ++d)
+ for (size_type row=0; row<my_size; ++row)
{
- int global_row_id = constant_modes_are_global ? domain_map.GID(row) : row;
+ int_type global_row_id = constant_modes_are_global ? domain_map.GID(row) : row;
distributed_constant_modes[d][row] =
additional_data.constant_modes[d][global_row_id];
}
const ::dealii::SparsityPattern *use_this_sparsity)
{
preconditioner.reset();
- const unsigned int n_rows = deal_ii_sparse_matrix.m();
+ const size_type n_rows = deal_ii_sparse_matrix.m();
// Init Epetra Matrix using an
// equidistributed map; avoid
// storing the nonzero
// elements.
- vector_distributor.reset (new Epetra_Map(static_cast<int>(n_rows), 0, communicator));
+ vector_distributor.reset (new Epetra_Map(static_cast<int_type>(n_rows), 0, communicator));
if (trilinos_matrix.get() == 0)
trilinos_matrix.reset (new SparseMatrix());
- std::size_t
+ size_type
PreconditionAMG::memory_consumption() const
{
unsigned int memory = sizeof(this);
// get a representation of the present
// row
- int ncols;
- int colnums = matrix->n();
+ int_type ncols;
+ int_type colnums = matrix->n();
if (value_cache.get() == 0)
{
value_cache.reset (new std::vector<TrilinosScalar> (matrix->n()));
- colnum_cache.reset (new std::vector<unsigned int> (matrix->n()));
+ colnum_cache.reset (new std::vector<size_type> (matrix->n()));
}
else
{
}
int ierr = matrix->trilinos_matrix().
- ExtractGlobalRowCopy((int)this->a_row,
+ ExtractGlobalRowCopy((int_type)this->a_row,
colnums,
ncols, &((*value_cache)[0]),
- reinterpret_cast<int *>(&((*colnum_cache)[0])));
+ reinterpret_cast<int_type *>(&((*colnum_cache)[0])));
value_cache->resize (ncols);
colnum_cache->resize (ncols);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
- const unsigned int n_max_entries_per_row)
+ const size_type n_max_entries_per_row)
:
column_space_map (new Epetra_Map (input_map)),
matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
- int(n_max_entries_per_row), false)),
+ int_type(n_max_entries_per_row), false)),
last_action (Zero),
compressed (false)
{}
SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
:
column_space_map (new Epetra_Map (input_map)),
matrix (new Epetra_FECrsMatrix
(Copy, *column_space_map,
- (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
+ (int_type *)const_cast<size_type *>(&(n_entries_per_row[0])),
false)),
last_action (Zero),
compressed (false)
SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const unsigned int n_max_entries_per_row)
+ const size_type n_max_entries_per_row)
:
column_space_map (new Epetra_Map (input_col_map)),
matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
- int(n_max_entries_per_row), false)),
+ int_type(n_max_entries_per_row), false)),
last_action (Zero),
compressed (false)
{}
SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
:
column_space_map (new Epetra_Map (input_col_map)),
matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
- (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
+ (int_type *)const_cast<size_type *>(&(n_entries_per_row[0])),
false)),
last_action (Zero),
compressed (false)
- SparseMatrix::SparseMatrix (const unsigned int m,
- const unsigned int n,
- const unsigned int n_max_entries_per_row)
+ SparseMatrix::SparseMatrix (const size_type m,
+ const size_type n,
+ const size_type n_max_entries_per_row)
:
- column_space_map (new Epetra_Map (static_cast<int>(n), 0,
+ column_space_map (new Epetra_Map (static_cast<int_type>(n), 0,
Utilities::Trilinos::comm_self())),
// on one processor only, we know how the
// information from columns is only
// available when entries have been added
matrix (new Epetra_FECrsMatrix(Copy,
- Epetra_Map (static_cast<int>(m), 0,
+ Epetra_Map (static_cast<int_type>(m), 0,
Utilities::Trilinos::comm_self()),
*column_space_map,
n_max_entries_per_row,
- SparseMatrix::SparseMatrix (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row)
+ SparseMatrix::SparseMatrix (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row)
:
- column_space_map (new Epetra_Map (static_cast<int>(n), 0,
+ column_space_map (new Epetra_Map (static_cast<int_type>(n), 0,
Utilities::Trilinos::comm_self())),
matrix (new Epetra_FECrsMatrix(Copy,
- Epetra_Map (static_cast<int>(m), 0,
+ Epetra_Map (static_cast<int_type>(m), 0,
Utilities::Trilinos::comm_self()),
*column_space_map,
- (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
+ (int_type *)const_cast<size_type *>(&(n_entries_per_row[0])),
false)),
last_action (Zero),
compressed (false)
SparseMatrix::SparseMatrix (const IndexSet ¶llel_partitioning,
const MPI_Comm &communicator,
- const unsigned int n_max_entries_per_row)
+ const size_type n_max_entries_per_row)
:
column_space_map (new Epetra_Map(parallel_partitioning.
make_trilinos_map(communicator, false))),
SparseMatrix::SparseMatrix (const IndexSet ¶llel_partitioning,
const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
:
column_space_map (new Epetra_Map(parallel_partitioning.
make_trilinos_map(communicator, false))),
matrix (new Epetra_FECrsMatrix(Copy,
*column_space_map,
- (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
+ (int_type *)const_cast<size_type *>(&(n_entries_per_row[0])),
false)),
last_action (Zero),
compressed (false)
- SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const unsigned int n_max_entries_per_row)
+ SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const size_type n_max_entries_per_row)
:
column_space_map (new Epetra_Map(col_parallel_partitioning.
make_trilinos_map(communicator, false))),
SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
:
column_space_map (new Epetra_Map(col_parallel_partitioning.
make_trilinos_map(communicator, false))),
matrix (new Epetra_FECrsMatrix(Copy,
row_parallel_partitioning.
make_trilinos_map(communicator, false),
- (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
+ (int_type *)const_cast<size_type *>(&(n_entries_per_row[0])),
false)),
last_action (Zero),
compressed (false)
void
SparseMatrix::reinit (const SparsityType &sparsity_pattern)
{
- const Epetra_Map rows (static_cast<int>(sparsity_pattern.n_rows()),
+ const Epetra_Map rows (static_cast<int_type>(sparsity_pattern.n_rows()),
0,
Utilities::Trilinos::comm_self());
- const Epetra_Map columns (static_cast<int>(sparsity_pattern.n_cols()),
+ const Epetra_Map columns (static_cast<int_type>(sparsity_pattern.n_cols()),
0,
Utilities::Trilinos::comm_self());
if (input_row_map.Comm().MyPID() == 0)
{
AssertDimension (sparsity_pattern.n_rows(),
- static_cast<unsigned int>(input_row_map.NumGlobalElements()));
+ static_cast<size_type>(input_row_map.NumGlobalElements()));
AssertDimension (sparsity_pattern.n_cols(),
- static_cast<unsigned int>(input_col_map.NumGlobalElements()));
+ static_cast<size_type>(input_col_map.NumGlobalElements()));
}
column_space_map.reset (new Epetra_Map (input_col_map));
- const unsigned int first_row = input_row_map.MinMyGID(),
+ const size_t first_row = input_row_map.MinMyGID(),
last_row = input_row_map.MaxMyGID()+1;
- std::vector<int> n_entries_per_row(last_row-first_row);
+ std::vector<int_type> n_entries_per_row(last_row-first_row);
- for (unsigned int row=first_row; row<last_row; ++row)
+ for (size_type row=first_row; row<last_row; ++row)
n_entries_per_row[row-first_row] = sparsity_pattern.row_length(row);
// The deal.II notation of a Sparsity
// distributed.
// now insert the indices
- std::vector<int> row_indices;
+ std::vector<int_type> row_indices;
- for (unsigned int row=first_row; row<last_row; ++row)
+ for (size_type row=first_row; row<last_row; ++row)
{
- const int row_length = sparsity_pattern.row_length(row);
+ const int_type row_length = sparsity_pattern.row_length(row);
if (row_length == 0)
continue;
typename SparsityType::row_iterator col_num = sparsity_pattern.row_begin (row),
row_end = sparsity_pattern.row_end(row);
- for (unsigned int col = 0; col_num != row_end; ++col_num, ++col)
+ for (size_type col = 0; col_num != row_end; ++col_num, ++col)
row_indices[col] = *col_num;
graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
// check whether we got the number of
// columns right.
AssertDimension (sparsity_pattern.n_cols(),
- static_cast<unsigned int>(graph->NumGlobalCols()));
+ static_cast<size_type>(graph->NumGlobalCols()));
// And now finally generate the matrix.
matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
const bool copy_values,
const ::dealii::SparsityPattern *use_this_sparsity)
{
- const Epetra_Map rows (static_cast<int>(dealii_sparse_matrix.m()),
+ const Epetra_Map rows (static_cast<int_type>(dealii_sparse_matrix.m()),
0,
Utilities::Trilinos::comm_self());
- const Epetra_Map columns (static_cast<int>(dealii_sparse_matrix.n()),
+ const Epetra_Map columns (static_cast<int_type>(dealii_sparse_matrix.n()),
0,
Utilities::Trilinos::comm_self());
reinit (rows, columns, dealii_sparse_matrix, drop_tolerance,
return;
}
- const unsigned int n_rows = dealii_sparse_matrix.m();
+ const size_type n_rows = dealii_sparse_matrix.m();
- Assert (input_row_map.NumGlobalElements() == (int)n_rows,
+ Assert (input_row_map.NumGlobalElements() == (int_type)n_rows,
ExcDimensionMismatch (input_row_map.NumGlobalElements(),
n_rows));
- Assert (input_col_map.NumGlobalElements() == (int)dealii_sparse_matrix.n(),
+ Assert (input_col_map.NumGlobalElements() == (int_type)dealii_sparse_matrix.n(),
ExcDimensionMismatch (input_col_map.NumGlobalElements(),
dealii_sparse_matrix.n()));
// and the specified sparsity pattern might be different, need to go
// through the row for both these sparsity structures simultaneously in
// order to really set the correct values.
- unsigned int maximum_row_length = matrix->MaxNumEntries();
- std::vector<unsigned int> row_indices (maximum_row_length);
+ size_type maximum_row_length = matrix->MaxNumEntries();
+ std::vector<size_type> row_indices (maximum_row_length);
std::vector<TrilinosScalar> values (maximum_row_length);
- for (unsigned int row=0; row<n_rows; ++row)
+ for (size_type row=0; row<n_rows; ++row)
// see if the row is locally stored on this processor
- if (input_row_map.MyGID(static_cast<int>(row)) == true)
+ if (input_row_map.MyGID(static_cast<int_type>(row)) == true)
{
::dealii::SparsityPattern::iterator select_index =
sparsity_pattern.begin(row);
typename ::dealii::SparseMatrix<number>::const_iterator it =
dealii_sparse_matrix.begin(row);
- unsigned int col = 0;
+ size_type col = 0;
if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
{
// optimized diagonal
++select_index;
++it;
}
- set (row, col, reinterpret_cast<unsigned int *>(&row_indices[0]),
+ set (row, col, reinterpret_cast<size_type *>(&row_indices[0]),
&values[0], false);
}
// matrices and copy the content
const TrilinosScalar *in_values = input_matrix[0];
TrilinosScalar *values = (*matrix)[0];
- const unsigned int my_nonzeros = input_matrix.NumMyNonzeros();
+ const size_type my_nonzeros = input_matrix.NumMyNonzeros();
std::memcpy (&values[0], &in_values[0],
my_nonzeros*sizeof (TrilinosScalar));
}
void
- SparseMatrix::clear_row (const unsigned int row,
+ SparseMatrix::clear_row (const size_type row,
const TrilinosScalar new_diag_value)
{
Assert (matrix->Filled()==true, ExcMatrixNotCompressed());
// Only do this on the rows owned
// locally on this processor.
- int local_row = matrix->LRID(static_cast<int>(row));
+ int_map local_row = matrix->LRID(static_cast<int_map>(row));
if (local_row >= 0)
{
TrilinosScalar *values;
- int *col_indices;
- int num_entries;
+ int_map *col_indices;
+ int_map num_entries;
const int ierr = matrix->ExtractMyRowView(local_row, num_entries,
values, col_indices);
Assert (ierr == 0,
ExcTrilinosError(ierr));
- int *diag_find = std::find(col_indices,col_indices+num_entries,
+ int_map *diag_find = std::find(col_indices,col_indices+num_entries,
local_row);
- int diag_index = (int)(diag_find - col_indices);
+ int_map diag_index = (int_map)(diag_find - col_indices);
- for (int j=0; j<num_entries; ++j)
+ for (int_map j=0; j<num_entries; ++j)
if (diag_index != j || new_diag_value == 0)
values[j] = 0.;
void
- SparseMatrix::clear_rows (const std::vector<unsigned int> &rows,
- const TrilinosScalar new_diag_value)
+ SparseMatrix::clear_rows (const std::vector<size_type> &rows,
+ const TrilinosScalar new_diag_value)
{
compress();
- for (unsigned int row=0; row<rows.size(); ++row)
+ for (size_type row=0; row<rows.size(); ++row)
clear_row(rows[row], new_diag_value);
// This function needs to be called
TrilinosScalar
- SparseMatrix::operator() (const unsigned int i,
- const unsigned int j) const
+ SparseMatrix::operator() (const size_type i,
+ const size_type j) const
{
// Extract local indices in
// the matrix.
- int trilinos_i = matrix->LRID(static_cast<int>(i)),
- trilinos_j = matrix->LCID(static_cast<int>(j));
+ int_type trilinos_i = matrix->LRID(static_cast<int_type>(i)),
+ trilinos_j = matrix->LCID(static_cast<int_type>(j));
TrilinosScalar value = 0.;
// If the data is not on the
// Prepare pointers for extraction
// of a view of the row.
- int nnz_present = matrix->NumMyEntries(trilinos_i);
- int nnz_extracted;
- int *col_indices;
+ int_type nnz_present = matrix->NumMyEntries(trilinos_i);
+ int_type nnz_extracted;
+ int_type *col_indices;
TrilinosScalar *values;
// Generate the view and make
// look for the value, and then
// finally get it.
- int *el_find = std::find(col_indices, col_indices + nnz_present,
+ int_type *el_find = std::find(col_indices, col_indices + nnz_present,
trilinos_j);
- int local_col_index = (int)(el_find - col_indices);
+ int_type local_col_index = (int_type)(el_find - col_indices);
// This is actually the only
// difference to the el(i,j)
TrilinosScalar
- SparseMatrix::el (const unsigned int i,
- const unsigned int j) const
+ SparseMatrix::el (const size_type i,
+ const size_type j) const
{
// Extract local indices in
// the matrix.
- int trilinos_i = matrix->LRID(static_cast<int>(i)), trilinos_j = matrix->LCID(static_cast<int>(j));
+ int_type trilinos_i = matrix->LRID(static_cast<int_type>(i)), trilinos_j = matrix->LCID(static_cast<int_type>(j));
TrilinosScalar value = 0.;
// If the data is not on the
// Prepare pointers for extraction
// of a view of the row.
- int nnz_present = matrix->NumMyEntries(trilinos_i);
- int nnz_extracted;
- int *col_indices;
+ int_type nnz_present = matrix->NumMyEntries(trilinos_i);
+ int_type nnz_extracted;
+ int_type *col_indices;
TrilinosScalar *values;
// Generate the view and make
// Search the index where we
// look for the value, and then
// finally get it.
- int *el_find = std::find(col_indices, col_indices + nnz_present,
+ int_type *el_find = std::find(col_indices, col_indices + nnz_present,
trilinos_j);
- int local_col_index = (int)(el_find - col_indices);
+ int_type local_col_index = (int_type)(el_find - col_indices);
// This is actually the only
TrilinosScalar
- SparseMatrix::diag_element (const unsigned int i) const
+ SparseMatrix::diag_element (const size_type i) const
{
Assert (m() == n(), ExcNotQuadratic());
- unsigned int
- SparseMatrix::row_length (const unsigned int row) const
+ size_type
+ SparseMatrix::row_length (const size_type row) const
{
Assert (row < m(), ExcInternalError());
// get a representation of the
// present row
- int ncols = -1;
- int local_row = matrix->LRID(static_cast<int>(row));
+ int_type ncols = -1;
+ int_type local_row = matrix->LRID(static_cast<int_type>(row));
// on the processor who owns this
// row, we'll have a non-negative
ExcMessage ("Parallel distribution of matrix B and vector V "
"does not match."));
- const int local_N = inputright.local_size();
- for (int i=0; i<local_N; ++i)
+ const int_type local_N = inputright.local_size();
+ for (int_type i=0; i<local_N; ++i)
{
- int N_entries = -1;
+ int_type N_entries = -1;
double *new_data, *B_data;
mod_B->ExtractMyRowView (i, N_entries, new_data);
inputright.trilinos_matrix().ExtractMyRowView (i, N_entries, B_data);
double value = V.trilinos_vector()[0][i];
- for (int j=0; j<N_entries; ++j)
+ for (int_type j=0; j<N_entries; ++j)
new_data[j] = value * B_data[j];
}
}
inputleft.range_partitioner());
Assert (inputleft.domain_partitioner().LinearMap() == true,
ExcMessage("Matrix must be partitioned contiguously between procs."));
- for (unsigned int i=0; i<inputleft.local_size(); ++i)
+ for (size_type i=0; i<inputleft.local_size(); ++i)
{
- int num_entries, * indices;
+ int_type num_entries, * indices;
inputleft.trilinos_sparsity_pattern().ExtractMyRowView(i, num_entries,
indices);
Assert (num_entries >= 0, ExcInternalError());
- const unsigned int GID = inputleft.row_partitioner().GID(i);
- for (int j=0; j<num_entries; ++j)
+ const sizretype GID = inputleft.row_partitioner().GID(i);
+ for (int_type j=0; j<num_entries; ++j)
sparsity_transposed.add (inputleft.col_partitioner().GID(indices[j]),
GID);
}
sparsity_transposed.compress();
transposed_mat.reinit (sparsity_transposed);
- for (unsigned int i=0; i<inputleft.local_size(); ++i)
+ for (size_type i=0; i<inputleft.local_size(); ++i)
{
- int num_entries, * indices;
+ int_type num_entries, * indices;
double *values;
inputleft.trilinos_matrix().ExtractMyRowView(i, num_entries,
values, indices);
Assert (num_entries >= 0, ExcInternalError());
- const unsigned int GID = inputleft.row_partitioner().GID(i);
- for (int j=0; j<num_entries; ++j)
+ const size_type GID = inputleft.row_partitioner().GID(i);
+ for (int_type j=0; j<num_entries; ++j)
transposed_mat.set (inputleft.col_partitioner().GID(indices[j]),
GID, values[j]);
}
// import data if necessary
ML_Operator *Btmp, *Ctmp, *Ctmp2, *tptr;
ML_CommInfoOP *getrow_comm;
- int max_per_proc;
- int N_input_vector = B_->invec_leng;
+ int_type max_per_proc;
+ int_type N_input_vector = B_->invec_leng;
getrow_comm = B_->getrow->pre_comm;
if ( getrow_comm != NULL)
- for (int i = 0; i < getrow_comm->N_neighbors; i++)
- for (int j = 0; j < getrow_comm->neighbors[i].N_send; j++)
+ for (int_type i = 0; i < getrow_comm->N_neighbors; i++)
+ for (int_type j = 0; j < getrow_comm->neighbors[i].N_send; j++)
AssertThrow (getrow_comm->neighbors[i].send_list[j] < N_input_vector,
ExcInternalError());
Assert (rhs.m() == m(), ExcDimensionMismatch (rhs.m(), m()));
Assert (rhs.n() == n(), ExcDimensionMismatch (rhs.n(), n()));
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
local_range = rhs.local_range();
int ierr;
rhs.matrix->Filled() == true &&
this->local_range() == local_range &&
matrix->NumMyNonzeros() == rhs.matrix->NumMyNonzeros())
- for (unsigned int row=local_range.first;
+ for (size_type row=local_range.first;
row < local_range.second; ++row)
{
Assert (matrix->NumGlobalEntries(row) ==
ExcDimensionMismatch(matrix->NumGlobalEntries(row),
rhs.matrix->NumGlobalEntries(row)));
- const int row_local = matrix->RowMap().LID(static_cast<int>(row));
- int n_entries, rhs_n_entries;
+ const int_type row_local = matrix->RowMap().LID(static_cast<int_type>(row));
+ int_type n_entries, rhs_n_entries;
TrilinosScalar *value_ptr, *rhs_value_ptr;
// In debug mode, we want to check
// indices is relatively slow compared to
// just working with the values.
#ifdef DEBUG
- int *index_ptr, *rhs_index_ptr;
+ int_type *index_ptr, *rhs_index_ptr;
ierr = rhs.matrix->ExtractMyRowView (row_local, rhs_n_entries,
rhs_value_ptr, rhs_index_ptr);
Assert (ierr == 0, ExcTrilinosError(ierr));
AssertThrow (n_entries == rhs_n_entries,
ExcDimensionMismatch (n_entries, rhs_n_entries));
- for (int i=0; i<n_entries; ++i)
+ for (int_type i=0; i<n_entries; ++i)
{
*value_ptr++ += *rhs_value_ptr++ * factor;
#ifdef DEBUG
// respective add() function.
else
{
- unsigned int max_row_length = 0;
- for (unsigned int row=local_range.first;
+ size_type max_row_length = 0;
+ for (size_type row=local_range.first;
row < local_range.second; ++row)
max_row_length
= std::max (max_row_length,
- static_cast<unsigned int>(rhs.matrix->NumGlobalEntries(row)));
+ static_cast<size_type>(rhs.matrix->NumGlobalEntries(row)));
- std::vector<int> column_indices (max_row_length);
+ std::vector<int_type> column_indices (max_row_length);
std::vector<TrilinosScalar> values (max_row_length);
if (matrix->Filled() == true && rhs.matrix->Filled() == true &&
this->local_range() == local_range)
- for (unsigned int row=local_range.first;
+ for (size_type row=local_range.first;
row < local_range.second; ++row)
{
- const int row_local = matrix->RowMap().LID(static_cast<int>(row));
- int n_entries;
+ const int_type row_local = matrix->RowMap().LID(static_cast<int_type>(row));
+ int_type n_entries;
ierr = rhs.matrix->ExtractMyRowCopy (row_local, max_row_length,
n_entries,
&column_indices[0]);
Assert (ierr == 0, ExcTrilinosError(ierr));
- for (int i=0; i<n_entries; ++i)
+ for (int_type i=0; i<n_entries; ++i)
values[i] *= factor;
TrilinosScalar *value_ptr = &values[0];
}
else
{
- for (unsigned int row=local_range.first;
+ for (size_type row=local_range.first;
row < local_range.second; ++row)
{
- int n_entries;
+ int_type n_entries;
ierr = rhs.matrix->Epetra_CrsMatrix::ExtractGlobalRowCopy
- ((int)row, max_row_length, n_entries, &values[0], &column_indices[0]);
+ ((int_type)row, max_row_length, n_entries, &values[0], &column_indices[0]);
Assert (ierr == 0, ExcTrilinosError(ierr));
- for (int i=0; i<n_entries; ++i)
+ for (int_type i=0; i<n_entries; ++i)
values[i] *= factor;
ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues
- ((int)row, n_entries, &values[0], &column_indices[0]);
+ ((int_type)row, n_entries, &values[0], &column_indices[0]);
Assert (ierr == 0, ExcTrilinosError(ierr));
}
compress ();
else
{
double *values;
- int *indices;
- int num_entries;
+ int_type *indices;
+ int_type num_entries;
- for (int i=0; i<matrix->NumMyRows(); ++i)
+ for (int_type i=0; i<matrix->NumMyRows(); ++i)
{
matrix->ExtractMyRowView (i, num_entries, values, indices);
- for (int j=0; j<num_entries; ++j)
+ for (int_type j=0; j<num_entries; ++j)
out << "(" << matrix->GRID(i) << "," << matrix->GCID(indices[j]) << ") "
<< values[j] << std::endl;
}
- std::size_t
+ size_type
SparseMatrix::memory_consumption () const
{
- unsigned int static_memory = sizeof(this) + sizeof (*matrix)
+ size_type static_memory = sizeof(this) + sizeof (*matrix)
+ sizeof(*matrix->Graph().DataPtr());
- return ((sizeof(TrilinosScalar)+sizeof(int))*matrix->NumMyNonzeros() +
+ return ((sizeof(TrilinosScalar)+sizeof(int_type))*matrix->NumMyNonzeros() +
sizeof(int)*local_size() +
static_memory);
}
int colnums = sparsity_pattern->n_cols();
int ierr;
- ierr = sparsity_pattern->graph->ExtractGlobalRowCopy((int)this->a_row,
+ ierr = sparsity_pattern->graph->ExtractGlobalRowCopy((int_type)this->a_row,
colnums,
ncols,
- (int *)&(*colnum_cache)[0]);
+ (int_type *)&(*colnum_cache)[0]);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
// copy it into our caches if the
// iterator for an empty line (what
// would it point to?)
Assert (ncols != 0, ExcInternalError());
- colnum_cache.reset (new std::vector<unsigned int> (colnums,
+ colnum_cache.reset (new std::vector<size_type> (colnums,
colnums+ncols));
}
}
SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
- const unsigned int n_entries_per_row)
+ const size_type n_entries_per_row)
{
reinit (input_map, input_map, n_entries_per_row);
}
- SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
- const std::vector<unsigned int> &n_entries_per_row)
+ SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
+ const std::vector<size_type> &n_entries_per_row)
{
reinit (input_map, input_map, n_entries_per_row);
}
SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const unsigned int n_entries_per_row)
+ const size_type n_entries_per_row)
{
reinit (input_row_map, input_col_map, n_entries_per_row);
}
SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
{
reinit (input_row_map, input_col_map, n_entries_per_row);
}
- SparsityPattern::SparsityPattern (const unsigned int m,
- const unsigned int n,
- const unsigned int n_entries_per_row)
+ SparsityPattern::SparsityPattern (const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row)
{
reinit (m, n, n_entries_per_row);
}
- SparsityPattern::SparsityPattern (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row)
+ SparsityPattern::SparsityPattern (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row)
{
reinit (m, n, n_entries_per_row);
}
void
- SparsityPattern::reinit (const Epetra_Map &input_map,
- const unsigned int n_entries_per_row)
+ SparsityPattern::reinit (const Epetra_Map &input_map,
+ const size_type n_entries_per_row)
{
reinit (input_map, input_map, n_entries_per_row);
}
void
- SparsityPattern::reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int n_entries_per_row)
+ SparsityPattern::reinit (const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row)
{
- const Epetra_Map rows (static_cast<int>(m), 0, Utilities::Trilinos::comm_self());
- const Epetra_Map columns (static_cast<int>(n), 0, Utilities::Trilinos::comm_self());
+ const Epetra_Map rows (static_cast<int_type>(m), 0, Utilities::Trilinos::comm_self());
+ const Epetra_Map columns (static_cast<int_type>(n), 0, Utilities::Trilinos::comm_self());
reinit (rows, columns, n_entries_per_row);
}
void
- SparsityPattern::reinit (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
- const unsigned int n_entries_per_row)
+ SparsityPattern::reinit (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
+ const size_type n_entries_per_row)
{
graph.reset ();
column_space_map.reset (new Epetra_Map (input_col_map));
void
SparsityPattern::reinit (const Epetra_Map &input_map,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
{
reinit (input_map, input_map, n_entries_per_row);
}
void
- SparsityPattern::reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row)
+ SparsityPattern::reinit (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row)
{
- const Epetra_Map rows (static_cast<int>(m), 0, Utilities::Trilinos::comm_self());
- const Epetra_Map columns (static_cast<int>(n), 0, Utilities::Trilinos::comm_self());
+ const Epetra_Map rows (static_cast<int_type>(m), 0, Utilities::Trilinos::comm_self());
+ const Epetra_Map columns (static_cast<int_type>(n), 0, Utilities::Trilinos::comm_self());
reinit (rows, columns, n_entries_per_row);
}
void
SparsityPattern::reinit (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const std::vector<unsigned int> &n_entries_per_row)
+ const std::vector<int_type> &n_entries_per_row)
{
// release memory before reallocation
graph.reset ();
AssertDimension (n_entries_per_row.size(),
- static_cast<unsigned int>(input_row_map.NumGlobalElements()));
+ static_cast<size_type>(input_row_map.NumGlobalElements()));
column_space_map.reset (new Epetra_Map (input_col_map));
compressed = false;
graph.reset ();
AssertDimension (sp.n_rows(),
- static_cast<unsigned int>(input_row_map.NumGlobalElements()));
+ static_cast<size_type>(input_row_map.NumGlobalElements()));
AssertDimension (sp.n_cols(),
- static_cast<unsigned int>(input_col_map.NumGlobalElements()));
+ static_cast<size_type>(input_col_map.NumGlobalElements()));
column_space_map.reset (new Epetra_Map (input_col_map));
compressed = false;
Assert (input_row_map.LinearMap() == true,
ExcMessage ("This function is not efficient if the map is not contiguous."));
- const unsigned int first_row = input_row_map.MinMyGID(),
+ const size_type first_row = input_row_map.MinMyGID(),
last_row = input_row_map.MaxMyGID()+1;
- std::vector<int> n_entries_per_row(last_row - first_row);
+ std::vector<int_type> n_entries_per_row(last_row - first_row);
- for (unsigned int row=first_row; row<last_row; ++row)
+ for (size_type row=first_row; row<last_row; ++row)
n_entries_per_row[row-first_row] = sp.row_length(row);
if (input_row_map.Comm().NumProc() > 1)
false));
AssertDimension (sp.n_rows(),
- static_cast<unsigned int>(graph->NumGlobalRows()));
+ static_cast<size_type>(graph->NumGlobalRows()));
- std::vector<int> row_indices;
+ std::vector<int_type> row_indices;
// Include possibility to exchange data
// since CompressedSimpleSparsityPattern is
// able to do so
if (exchange_data==false)
- for (unsigned int row=first_row; row<last_row; ++row)
+ for (size_type row=first_row; row<last_row; ++row)
{
- const int row_length = sp.row_length(row);
+ const int_type row_length = sp.row_length(row);
if (row_length == 0)
continue;
typename SparsityType::row_iterator col_num = sp.row_begin (row),
row_end = sp.row_end(row);
- for (unsigned int col = 0; col_num != row_end; ++col_num, ++col)
+ for (size_type col = 0; col_num != row_end; ++col_num, ++col)
row_indices[col] = *col_num;
graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
&row_indices[0]);
}
else
- for (unsigned int row=0; row<sp.n_rows(); ++row)
+ for (size_type row=0; row<sp.n_rows(); ++row)
{
- const int row_length = sp.row_length(row);
+ const int_type row_length = sp.row_length(row);
if (row_length == 0)
continue;
typename SparsityType::row_iterator col_num = sp.row_begin (row),
row_end = sp.row_end(row);
- for (unsigned int col = 0; col_num != row_end; ++col_num, ++col)
+ for (size_type col = 0; col_num != row_end; ++col_num, ++col)
row_indices[col] = *col_num;
- graph->InsertGlobalIndices (1, reinterpret_cast<int *>(&row), row_length,
- &row_indices[0]);
+ graph->InsertGlobalIndices (1, reinterpret_cast<int_type *>(&row),
+ row_length, &row_indices[0]);
}
compress();
void
SparsityPattern::copy_from (const SparsityType &sp)
{
- const Epetra_Map rows (static_cast<int>(sp.n_rows()), 0, Utilities::Trilinos::comm_self());
- const Epetra_Map columns (static_cast<int>(sp.n_cols()), 0, Utilities::Trilinos::comm_self());
+ const Epetra_Map rows (static_cast<int_type>(sp.n_rows()), 0, Utilities::Trilinos::comm_self());
+ const Epetra_Map columns (static_cast<int_type>(sp.n_cols()), 0, Utilities::Trilinos::comm_self());
reinit (rows, columns, sp);
}
bool
- SparsityPattern::exists (const unsigned int i,
- const unsigned int j) const
+ SparsityPattern::exists (const size_type i,
+ const size_type j) const
{
// Extract local indices in
// the matrix.
- int trilinos_i = graph->LRID(static_cast<int>(i)), trilinos_j = graph->LCID(static_cast<int>(j));
+ int_type trilinos_i = graph->LRID(static_cast<int_type>(i)), trilinos_j = graph->LCID(static_cast<int_type>(j));
// If the data is not on the
// present processor, we throw
// local indices.
if (graph->Filled() == false)
{
- int nnz_present = graph->NumGlobalIndices(i);
- int nnz_extracted;
- int *col_indices;
+ int_type nnz_present = graph->NumGlobalIndices(i);
+ int_type nnz_extracted;
+ int_type *col_indices;
// Generate the view and make
// sure that we have not generated
ExcDimensionMismatch(nnz_present, nnz_extracted));
// Search the index
- int *el_find = std::find(col_indices, col_indices + nnz_present,
+ int_type *el_find = std::find(col_indices, col_indices + nnz_present,
trilinos_j);
- int local_col_index = (int)(el_find - col_indices);
+ int_type local_col_index = (int_type)(el_find - col_indices);
if (local_col_index == nnz_present)
return false;
{
// Prepare pointers for extraction
// of a view of the row.
- int nnz_present = graph->NumGlobalIndices(i);
- int nnz_extracted;
- int *col_indices;
+ int_type nnz_present = graph->NumGlobalIndices(i);
+ int_type nnz_extracted;
+ int_type *col_indices;
// Generate the view and make
// sure that we have not generated
ExcDimensionMismatch(nnz_present, nnz_extracted));
// Search the index
- int *el_find = std::find(col_indices, col_indices + nnz_present,
+ int_type *el_find = std::find(col_indices, col_indices + nnz_present,
trilinos_j);
- int local_col_index = (int)(el_find - col_indices);
+ int_type local_col_index = (int_type)(el_find - col_indices);
if (local_col_index == nnz_present)
return false;
- unsigned int
+ size_type
SparsityPattern::bandwidth () const
{
- unsigned int local_b=0;
- int global_b=0;
- for (unsigned int i=0; i<local_size(); ++i)
+ size_type local_b=0;
+ int_type global_b=0;
+ for (size_type i=0; i<local_size(); ++i)
{
- int *indices;
- int num_entries;
+ int_type *indices;
+ int_type num_entries;
graph->ExtractMyRowView(i, num_entries, indices);
- for (unsigned int j=0; j<(unsigned int)num_entries; ++j)
+ for (size_type j=0; j<(size_type)num_entries; ++j)
{
- if (static_cast<unsigned int>(std::abs(static_cast<int>(i-indices[j]))) > local_b)
- local_b = std::abs(static_cast<signed int>(i-indices[j]));
+ if (static_cast<size_type>(std::abs(static_cast<int_int>(i-indices[j]))) > local_b)
+ local_b = std::abs(static_cast<size_type>(i-indices[j]));
}
}
- graph->Comm().MaxAll((int *)&local_b, &global_b, 1);
- return static_cast<unsigned int>(global_b);
+ graph->Comm().MaxAll((int_type *)&local_b, &global_b, 1);
+ return static_cast<size_type>(global_b);
}
- unsigned int
+ size_type
SparsityPattern::n_rows () const
{
- const int n_rows = graph -> NumGlobalRows();
+ const int_type n_rows = graph -> NumGlobalRows();
return n_rows;
}
- unsigned int
+ size_type
SparsityPattern::n_cols () const
{
- int n_cols;
+ int_type n_cols;
if (graph->Filled() == true)
n_cols = graph -> NumGlobalCols();
else
- unsigned int
+ size_type
SparsityPattern::local_size () const
{
- int n_rows = graph -> NumMyRows();
+ int_type n_rows = graph -> NumMyRows();
return n_rows;
}
- std::pair<unsigned int, unsigned int>
+ std::pair<size_type, size_type>
SparsityPattern::local_range () const
{
- unsigned int begin, end;
+ size_type begin, end;
begin = graph -> RowMap().MinMyGID();
end = graph -> RowMap().MaxMyGID()+1;
- unsigned int
+ size_type
SparsityPattern::n_nonzero_elements () const
{
- int nnz = graph->NumGlobalEntries();
+ int_type nnz = graph->NumGlobalEntries();
- return static_cast<unsigned int>(nnz);
+ return static_cast<size_type>(nnz);
}
- unsigned int
+ size_type
SparsityPattern::max_entries_per_row () const
{
- int nnz = graph->MaxNumIndices();
+ int_type nnz = graph->MaxNumIndices();
- return static_cast<unsigned int>(nnz);
+ return static_cast<size_type>(nnz);
}
- unsigned int
- SparsityPattern::row_length (const unsigned int row) const
+ size_type
+ SparsityPattern::row_length (const size_type row) const
{
Assert (row < n_rows(), ExcInternalError());
// get a representation of the
// present row
- int ncols = -1;
- int local_row = graph->LRID(static_cast<int>(row));
+ int_type ncols = -1;
+ int_type local_row = graph->LRID(static_cast<int_type>(row));
// on the processor who owns this
// row, we'll have a non-negative
if (local_row >= 0)
ncols = graph->NumMyIndices (local_row);
- return static_cast<unsigned int>(ncols);
+ return static_cast<size_type>(ncols);
}
out << *graph;
else
{
- int *indices;
- int num_entries;
+ int_type *indices;
+ int_type num_entries;
- for (int i=0; i<graph->NumMyRows(); ++i)
+ for (int_type i=0; i<graph->NumMyRows(); ++i)
{
graph->ExtractMyRowView (i, num_entries, indices);
- for (int j=0; j<num_entries; ++j)
+ for (int_type j=0; j<num_entries; ++j)
out << "(" << i << "," << indices[graph->GRID(j)] << ") "
<< std::endl;
}
SparsityPattern::print_gnuplot (std::ostream &out) const
{
Assert (graph->Filled() == true, ExcInternalError());
- for (unsigned int row=0; row<local_size(); ++row)
+ for (size_type row=0; row<local_size(); ++row)
{
- signed int *indices;
- int num_entries;
+ signed int_type *indices;
+ int_type num_entries;
graph->ExtractMyRowView (row, num_entries, indices);
- for (unsigned int j=0; j<(unsigned int)num_entries; ++j)
+ for (size_type j=0; j<(size_type)num_entries; ++j)
// while matrix entries are usually
// written (i,j), with i vertical and
// j horizontal, gnuplot output is
// x-y, that is we have to exchange
// the order of output
- out << indices[graph->GRID(static_cast<int>(j))] << " " << -static_cast<signed int>(row)
+ out << indices[graph->GRID(static_cast<int_type>(j))] << " " << -static_cast<signed int_type>(row)
<< std::endl;
}
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_USE_TRILINOS
// create a vector that holds all the elements
// contained in the block vector. need to
// manually create an Epetra_Map.
- unsigned int n_elements = 0, added_elements = 0, block_offset = 0;
- for (unsigned int block=0; block<v.n_blocks(); ++block)
+ size_type n_elements = 0, added_elements = 0, block_offset = 0;
+ for (size_type block=0; block<v.n_blocks(); ++block)
n_elements += v.block(block).local_size();
- std::vector<int> global_ids (n_elements, -1);
- for (unsigned int block=0; block<v.n_blocks(); ++block)
+ std::vector<int_type> global_ids (n_elements, -1);
+ for (size_type block=0; block<v.n_blocks(); ++block)
{
- int *glob_elements = v.block(block).vector_partitioner().MyGlobalElements();
- for (unsigned int i=0; i<v.block(block).local_size(); ++i)
+ int_type *glob_elements =
+ v.block(block).vector_partitioner().MyGlobalElements();
+ for (size_type i=0; i<v.block(block).local_size(); ++i)
global_ids[added_elements++] = glob_elements[i] + block_offset;
block_offset += v.block(block).size();
}
TrilinosScalar *entries = (*actual_vec)[0];
block_offset = 0;
- for (unsigned int block=0; block<v.n_blocks(); ++block)
+ for (size_type block=0; block<v.n_blocks(); ++block)
{
v.block(block).trilinos_vector().ExtractCopy (entries, 0);
entries += v.block(block).local_size();
if (import_data == true)
{
- AssertThrow (static_cast<unsigned int>(actual_vec->GlobalLength())
+ AssertThrow (static_cast<size_type>(actual_vec->GlobalLength())
== v.size(),
ExcDimensionMismatch (actual_vec->GlobalLength(),
v.size()));
- Vector::Vector (const unsigned int n)
+ Vector::Vector (const size_type n)
{
last_action = Zero;
- Epetra_LocalMap map ((int)n, 0, Utilities::Trilinos::comm_self());
+ Epetra_LocalMap map ((int_type)n, 0, Utilities::Trilinos::comm_self());
vector.reset (new Epetra_FEVector (map));
}
const MPI_Comm &communicator)
{
last_action = Zero;
- Epetra_LocalMap map (static_cast<int>(partitioning.size()),
+ Epetra_LocalMap map (static_cast<int_type>(partitioning.size()),
0,
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
Epetra_MpiComm(communicator));
void
- Vector::reinit (const unsigned int n,
- const bool fast)
+ Vector::reinit (const size_type n,
+ const bool fast)
{
if (size() != n)
{
- Epetra_LocalMap map ((int)n, 0,
+ Epetra_LocalMap map ((int_type)n, 0,
Utilities::Trilinos::comm_self());
vector.reset (new Epetra_FEVector (map));
}
const bool fast)
{
if (vector->Map().NumGlobalElements() !=
- static_cast<int>(partitioning.size()))
+ static_cast<int_type>(partitioning.size()))
{
- Epetra_LocalMap map (static_cast<int>(partitioning.size()),
+ Epetra_LocalMap map (static_cast<int_type>(partitioning.size()),
0,
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
Epetra_MpiComm(communicator));
// we can use []. Note that we
// can only get local values.
- const int local_index = vector.vector->Map().LID(static_cast<int>(index));
+ const int_type local_index =
+ vector.vector->Map().LID(static_cast<int_type>(index));
Assert (local_index >= 0,
ExcAccessToNonLocalElement (index,
vector.vector->Map().MinMyGID(),
//
// let's hope this isn't a
// particularly frequent operation
- std::pair<unsigned int, unsigned int>
+ std::pair<size_type, size_type>
local_range = this->local_range ();
- for (unsigned int i=local_range.first; i<local_range.second; ++i)
+ for (size_type i=local_range.first; i<local_range.second; ++i)
(*vector)[0][i-local_range.first] = v(i);
return *this;
TrilinosScalar
- VectorBase::el (const unsigned int index) const
+ VectorBase::el (const size_type index) const
{
// Extract local indices in
// the vector.
- int trilinos_i = vector->Map().LID(static_cast<int>(index));
+ int_type trilinos_i = vector->Map().LID(static_cast<int_type>(index));
TrilinosScalar value = 0.;
// If the element is not
TrilinosScalar
- VectorBase::operator () (const unsigned int index) const
+ VectorBase::operator () (const size_type index) const
{
// Extract local indices in
// the vector.
- int trilinos_i = vector->Map().LID(static_cast<int>(index));
+ int_type trilinos_i = vector->Map().LID(static_cast<int_type>(index));
TrilinosScalar value = 0.;
// If the element is not present
if (local_size() != v.local_size())
return false;
- unsigned int i;
+ size_type i;
for (i=0; i<local_size(); i++)
if ((*(v.vector))[0][i]!=(*vector)[0][i]) return false;
{
Assert (vector->GlobalLength()!=0, ExcEmptyObject());
- for (unsigned int j=0; j<size(); ++j)
+ for (size_type j=0; j<size(); ++j)
{
double t = (*vector)[0][j];
out.setf (std::ios::fixed, std::ios::floatfield);
if (across)
- for (unsigned int i=0; i<size(); ++i)
+ for (size_type i=0; i<size(); ++i)
out << static_cast<double>(val[i]) << ' ';
else
- for (unsigned int i=0; i<size(); ++i)
+ for (size_type i=0; i<size(); ++i)
out << static_cast<double>(val[i]) << std::endl;
out << std::endl;
//one index and the value per local
//entry.
return sizeof(*this)
- + this->local_size()*( sizeof(double)+sizeof(int) );
+ + this->local_size()*( sizeof(double)+sizeof(int_type) );
}
} /* end of namespace TrilinosWrappers */
// vectors. Actually, there should
// be none, if there is no memory
// leak
- unsigned int n=0;
for (typename std::vector<entry_type>::iterator i=data->begin();
i != data->end();
++i)
{
- if (i->first == true)
- ++n;
delete i->second;
}
delete data;
template <typename VECTOR>
inline
void
-GrowingVectorMemory<VECTOR>::Pool::initialize(const unsigned int size)
+GrowingVectorMemory<VECTOR>::Pool::initialize(const size_type size)
{
if (data == 0)
{
template <typename VECTOR>
inline
-GrowingVectorMemory<VECTOR>::GrowingVectorMemory (const unsigned int initial_size,
+GrowingVectorMemory<VECTOR>::GrowingVectorMemory (const size_type initial_size,
const bool log_statistics)
: