BRUNO source/hp/fe_collection.cc
MARKUS source/hp/fe_values.cc
BRUNO source/hp/mapping_collection.cc
-source/lac/block_matrix_array.cc
+BRUNO source/lac/block_matrix_array.cc
MARKUS source/lac/block_sparse_matrix.cc
MARKUS source/lac/block_sparse_matrix_ez.cc
MARKUS source/lac/block_sparsity_pattern.cc
BRUNO include/deal.II/integrators/local_integrators.h
BRUNO include/deal.II/integrators/maxwell.h
BRUNO include/deal.II/integrators/patches.h
-include/deal.II/lac/arpack_solver.h
-include/deal.II/lac/block_indices.h
-include/deal.II/lac/block_list.h
-include/deal.II/lac/block_matrix_array.h
-include/deal.II/lac/block_matrix_base.h
+BRUNO include/deal.II/lac/arpack_solver.h
+BRUNO include/deal.II/lac/block_indices.h
+BRUNO include/deal.II/lac/block_list.h
+BRUNO include/deal.II/lac/block_matrix_array.h
+BRUNO include/deal.II/lac/block_matrix_base.h
include/deal.II/lac/block_matrix.h
MARKUS include/deal.II/lac/block_sparse_matrix_ez.h
MARKUS include/deal.II/lac/block_sparse_matrix_ez.templates.h
static const unsigned int
invalid_unsigned_int = static_cast<unsigned int> (-1);
+ /**
+ * Representation of the
+ * largest number that
+ * can be put into a
+ * size_type. This value
+ * is used throughout
+ * the library as a
+ * marker for an
+ * invalid size_type
+ * value, such as
+ * an invalid array
+ * index, an invalid
+ * array size, and the
+ * like.
+ */
+ static const size_type
+ invalid_size_type = static_cast<size_type> (-1);
+
/**
* An invalid value for indices of degrees
* of freedom.
class ArpackSolver : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
+
/**
* An enum that lists the possible
* choices for which eigenvalues to
tmp.reinit(src);
- for (unsigned int i=0; i<src.size(); ++i)
+ for (size_type i=0; i<src.size(); ++i)
src(i) = *(workd+ipntr[0]-1+i);
// multiplication with mass matrix M
// solving linear system
inverse.vmult(dst,tmp);
- for (unsigned int i=0; i<dst.size(); ++i)
+ for (size_type i=0; i<dst.size(); ++i)
*(workd+ipntr[1]-1+i) = dst(i);
}
break;
tmp.reinit(src);
tmp2.reinit(src);
- for (unsigned int i=0; i<src.size(); ++i)
+ for (size_type i=0; i<src.size(); ++i)
{
src(i) = *(workd+ipntr[2]-1+i);
tmp(i) = *(workd+ipntr[0]-1+i);
// solving linear system
inverse.vmult(dst,src);
- for (unsigned int i=0; i<dst.size(); ++i)
+ for (size_type i=0; i<dst.size(); ++i)
*(workd+ipntr[1]-1+i) = dst(i);
}
break;
src.reinit(eigenvectors[0]);
dst.reinit(src);
- for (unsigned int i=0; i<src.size(); ++i)
+ for (size_type i=0; i<src.size(); ++i)
src(i) = *(workd+ipntr[0]-1+i);
// Multiplication with mass matrix M
mass_matrix.vmult(dst, src);
- for (unsigned int i=0; i<dst.size(); ++i)
+ for (size_type i=0; i<dst.size(); ++i)
*(workd+ipntr[1]-1+i) = dst(i);
}
Assert (false, ExcArpackInfodneupd(info));
}
- for (unsigned int i=0; i<eigenvectors.size(); ++i)
+ for (size_type i=0; i<eigenvectors.size(); ++i)
for (unsigned int j=0; j<n; ++j)
eigenvectors[i](j) = v[i*n+j];
AssertDimension (eigenvalues.size(), eigenvalues_real.size());
AssertDimension (eigenvalues.size(), eigenvalues_im.size());
- for (unsigned int i=0; i<eigenvalues.size(); ++i)
+ for (size_type i=0; i<eigenvalues.size(); ++i)
{
eigenvalues[i].real() = eigenvalues_real[i];
eigenvalues[i].imag() = eigenvalues_im[i];
class BlockIndices : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
/**
* Default
* number of blocks will be the
* size of the vector
*/
- BlockIndices (const std::vector<unsigned int> &n);
+ BlockIndices (const std::vector<size_type> &n);
/**
* Specialized constructor for a
* structure with blocks of equal size.
*/
- explicit BlockIndices(const unsigned int n_blocks, const unsigned int block_size = 0);
+ explicit BlockIndices(const size_type n_blocks, const size_type block_size = 0);
/**
* Reinitialize the number of
* blocks and assign each block
* the same number of elements.
*/
- void reinit (const unsigned int n_blocks,
- const unsigned int n_elements_per_block);
+ void reinit (const size_type n_blocks,
+ const size_type n_elements_per_block);
/**
* Reinitialize the number of
* of block @p i is set to
* <tt>n[i]</tt>.
*/
- void reinit (const std::vector<unsigned int> &n);
+ void reinit (const std::vector<size_type> &n);
/**
* Add another block of given
* size to the end of the block
* structure.
*/
- void push_back(const unsigned int size);
+ void push_back(const size_type size);
/**
* @name Size information
/**
* Number of blocks in index field.
*/
- unsigned int size () const;
+ size_type size () const;
/**
* Return the total number of
* of the vector space of the
* block vector.
*/
- unsigned int total_size () const;
+ size_type total_size () const;
/**
* The size of the @p ith block.
*/
- unsigned int block_size (const unsigned int i) const;
+ size_type block_size (const size_type i) const;
//@}
* the block, the second the
* index within it.
*/
- std::pair<unsigned int,unsigned int>
- global_to_local (const unsigned int i) const;
+ std::pair<size_type,size_type>
+ global_to_local (const size_type i) const;
/**
* Return the global index of
* @p index in block @p block.
*/
- unsigned int local_to_global (const unsigned int block,
- const unsigned int index) const;
+ size_type local_to_global (const size_type block,
+ const size_type index) const;
/**
* The start index of the ith block.
*/
- unsigned int block_start (const unsigned int i) const;
+ size_type block_start (const size_type i) const;
//@}
/**
* we cache this value for faster
* access.
*/
- unsigned int n_blocks;
+ size_type n_blocks;
/**
* Global starting index of each
* value is the total number of
* entries.
*/
- std::vector<unsigned int> start_indices;
+ std::vector<size_type> start_indices;
};
LogStream &
operator << (LogStream &s, const BlockIndices &bi)
{
- const unsigned int n = bi.size();
+ const size_type n = bi.size();
s << n << ":[";
// Write first size without leading space
if (n>0)
s << bi.block_size(0);
// Write all other sizes
- for (unsigned int i=1; i<n; ++i)
+ for (size_type i=1; i<n; ++i)
s << ' ' << bi.block_size(i);
s << "]->" << bi.total_size();
return s;
inline
void
-BlockIndices::reinit (const unsigned int nb,
- const unsigned int block_size)
+BlockIndices::reinit (const size_type nb,
+ const size_type block_size)
{
n_blocks = nb;
start_indices.resize(n_blocks+1);
- for (unsigned int i=0; i<=n_blocks; ++i)
+ for (size_type i=0; i<=n_blocks; ++i)
start_indices[i] = i * block_size;
}
inline
void
-BlockIndices::reinit (const std::vector<unsigned int> &n)
+BlockIndices::reinit (const std::vector<size_type> &n)
{
if (start_indices.size() != n.size()+1)
{
start_indices.resize(n_blocks+1);
}
start_indices[0] = 0;
- for (unsigned int i=1; i<=n_blocks; ++i)
+ for (size_type i=1; i<=n_blocks; ++i)
start_indices[i] = start_indices[i-1] + n[i-1];
}
inline
BlockIndices::BlockIndices (
- const unsigned int n_blocks,
- const unsigned int block_size)
+ const size_type n_blocks,
+ const size_type block_size)
:
n_blocks(n_blocks),
start_indices(n_blocks+1)
{
- for (unsigned int i=0; i<=n_blocks; ++i)
+ for (size_type i=0; i<=n_blocks; ++i)
start_indices[i] = i * block_size;
}
inline
-BlockIndices::BlockIndices (const std::vector<unsigned int> &n)
+BlockIndices::BlockIndices (const std::vector<size_type> &n)
:
n_blocks(n.size()),
start_indices(n.size()+1)
inline
void
-BlockIndices::push_back(const unsigned int sz)
+BlockIndices::push_back(const size_type sz)
{
start_indices.push_back(start_indices[n_blocks]+sz);
++n_blocks;
inline
-std::pair<unsigned int,unsigned int>
-BlockIndices::global_to_local (const unsigned int i) const
+std::pair<size_type,size_type>
+BlockIndices::global_to_local (const size_type i) const
{
Assert (i<total_size(), ExcIndexRange(i, 0, total_size()));
while (i < start_indices[block])
--block;
- return std::pair<unsigned int,unsigned int>(block,
+ return std::pair<size_type,size_type>(block,
i-start_indices[block]);
}
inline
-unsigned int
-BlockIndices::local_to_global (const unsigned int block,
- const unsigned int index) const
+size_type
+BlockIndices::local_to_global (const size_type block,
+ const size_type index) const
{
Assert (block < n_blocks, ExcIndexRange(block, 0, n_blocks));
Assert (index < start_indices[block+1]-start_indices[block],
inline
-unsigned int
+size_type
BlockIndices::size () const
{
return n_blocks;
inline
-unsigned int
+size_type
BlockIndices::total_size () const
{
if (n_blocks == 0) return 0;
inline
-unsigned int
-BlockIndices::block_size (const unsigned int block) const
+size_type
+BlockIndices::block_size (const size_type block) const
{
Assert (block < n_blocks, ExcIndexRange(block, 0, n_blocks));
return start_indices[block+1]-start_indices[block];
inline
-unsigned int
-BlockIndices::block_start (const unsigned int block) const
+size_type
+BlockIndices::block_start (const size_type block) const
{
Assert (block < n_blocks, ExcIndexRange(block, 0, n_blocks));
return start_indices[block];
if (n_blocks != b.n_blocks)
return false;
- for (unsigned int i=0; i<=n_blocks; ++i)
+ for (size_type i=0; i<=n_blocks; ++i)
if (start_indices[i] != b.start_indices[i])
return false;
Assert (n_blocks == b.n_blocks,
ExcDimensionMismatch(n_blocks, b.n_blocks));
- for (unsigned int i=0; i<=n_blocks; ++i)
+ for (size_type i=0; i<=n_blocks; ++i)
std::swap (start_indices[i], b.start_indices[i]);
}
public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/// The container for each index set
- typedef std::vector<unsigned int> block_container;
+ typedef std::vector<size_type> block_container;
/// The iterator for individual indices
typedef block_container::const_iterator const_iterator;
* argument is the dimension of
* the vector space.
*/
- void create_sparsity_pattern(SparsityPattern &sparsity, unsigned int n) const;
+ void create_sparsity_pattern(SparsityPattern &sparsity, size_type n) const;
/**
* Add the indices in
* <tt>block</tt>, eliminating
* repeated indices.
*/
- void add(unsigned int block, const std::vector<unsigned int> &indices);
+ void add(size_type block, const std::vector<size_type> &indices);
/**
* Add the indices in
* those indices for which
* <tt>selected_indices</tt> is true.
*/
- void add(unsigned int block,
- const std::vector<unsigned int> &indices,
+ void add(size_type block,
+ const std::vector<size_type> &indices,
const std::vector<bool> &selected_indices,
- unsigned int offset = 0);
+ size_type offset = 0);
/**
* Just set up the correct size
* and assign indices to blocks later.
*/
- void initialize(unsigned int n_blocks);
+ void initialize(size_type n_blocks);
/**
* Set up all index sets using an
* blocks as its first argument.
*/
template <typename ITERATOR>
- void initialize(unsigned int n_blocks,
+ void initialize(size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end);
* blocks as its first argument.
*/
template <typename ITERATOR>
- void initialize_mg(unsigned int n_blocks,
+ void initialize_mg(size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end) DEAL_II_DEPRECATED;
* blocks as its first argument.
*/
template <typename ITERATOR>
- void initialize(unsigned int n_blocks,
+ void initialize(size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end,
const std::vector<bool> &selected_dofs,
- unsigned int offset = 0) DEAL_II_DEPRECATED;
+ size_type offset = 0) DEAL_II_DEPRECATED;
/**
* @deprecated This function will
* move to DoFTools.
* blocks as its first argument.
*/
template <typename ITERATOR>
- void initialize_mg(unsigned int n_blocks,
+ void initialize_mg(size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end,
const std::vector<bool> &selected_dofs,
- unsigned int offset = 0) DEAL_II_DEPRECATED;
+ size_type offset = 0) DEAL_II_DEPRECATED;
/**
* @deprecated This function will
* around a vertex.
*/
template <int dim, typename ITERATOR>
- void initialize_vertex_patches_mg(unsigned int n_blocks,
+ void initialize_vertex_patches_mg(size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end,
const std::vector<bool> &selected_dofs = std::vector<bool>(),
- unsigned int offset = 0) DEAL_II_DEPRECATED;
+ size_type offset = 0) DEAL_II_DEPRECATED;
/**
* @deprecated This function will
/**
* The number of blocks.
*/
- unsigned int size() const;
+ size_type size() const;
/**
* The size of a single block.
*/
- unsigned int block_size(unsigned int block) const;
+ size_type block_size(size_type block) const;
/**
* Iterator to the first index in block.
*/
- const_iterator begin(unsigned int block) const;
+ const_iterator begin(size_type block) const;
/**
* End iterator for a single block.
*/
- const_iterator end(unsigned int block) const;
+ const_iterator end(size_type block) const;
/**
* Return the position of
* <tt>index</tt> in
* <tt>block</tt>, or
- * numbers::invalid_unsigned_int,
+ * numbers::invalid_size_type,
* if the index is not in the block.
*/
- unsigned int local_index(unsigned int block, unsigned int index) const;
+ size_type local_index(size_type block, size_type index) const;
private:
/**
inline
void
-BlockList::create_sparsity_pattern(SparsityPattern &sparsity, unsigned int n) const
+BlockList::create_sparsity_pattern(SparsityPattern &sparsity, size_type n) const
{
- std::vector<unsigned int> sizes(size());
- for (unsigned int b=0; b<size(); ++b)
+ std::vector<size_type> sizes(size());
+ for (size_type b=0; b<size(); ++b)
sizes[b] = block_size(b);
sparsity.reinit(size(), n, sizes);
- for (unsigned int b=0; b<size(); ++b)
+ for (size_type b=0; b<size(); ++b)
{
for (const_iterator i = begin(b); i != end(b); ++i)
sparsity.add(b,*i);
inline
void
-BlockList::add(const unsigned int block, const std::vector<unsigned int> &indices)
+BlockList::add(const size_type block, const std::vector<size_type> &indices)
{
AssertIndexRange(block, index_sets.size());
- for (unsigned int i=0; i<indices.size(); ++i)
+ for (size_type i=0; i<indices.size(); ++i)
{
- const unsigned int k = indices[i];
- if (k==numbers::invalid_unsigned_int)
+ const size_type k = indices[i];
+ if (k==numbers::invalid_size_type)
continue;
if (std::find(index_sets[block].begin(), index_sets[block].end(), k)
== index_sets[block].end())
inline
void
BlockList::add(
- const unsigned int block,
- const std::vector<unsigned int> &indices,
+ const size_type block,
+ const std::vector<size_type> &indices,
const std::vector<bool> &selected,
- unsigned int offset)
+ size_type offset)
{
AssertIndexRange(block, index_sets.size());
AssertDimension(indices.size(), selected.size());
- for (unsigned int i=0; i<indices.size(); ++i)
+ for (size_type i=0; i<indices.size(); ++i)
{
- const unsigned int k = indices[i];
- if (k==numbers::invalid_unsigned_int)
+ const types_global_dof_indices k = indices[i];
+ if (k==numbers::invalid_size_type)
continue;
if (selected[i] && std::find(index_sets[block].begin(), index_sets[block].end(), k-offset)
== index_sets[block].end())
inline
void
-BlockList::initialize(unsigned int n_blocks)
+BlockList::initialize(size_type n_blocks)
{
index_sets.resize(n_blocks);
}
template <typename ITERATOR>
inline
void
-BlockList::initialize(unsigned int n_blocks, const ITERATOR begin, const typename identity<ITERATOR>::type end)
+BlockList::initialize(size_type n_blocks, const ITERATOR begin, const typename identity<ITERATOR>::type end)
{
index_sets.resize(n_blocks);
- std::vector<unsigned int> indices;
- unsigned int k = 0;
+ std::vector<size_type> indices;
+ size_type k = 0;
for (ITERATOR cell = begin; cell != end; ++cell, ++k)
{
indices.resize(cell->get_fe().dofs_per_cell);
template <typename ITERATOR>
inline
void
-BlockList::initialize_mg(unsigned int n_blocks, const ITERATOR begin, const typename identity<ITERATOR>::type end)
+BlockList::initialize_mg(size_type n_blocks, const ITERATOR begin, const typename identity<ITERATOR>::type end)
{
index_sets.resize(n_blocks);
- std::vector<unsigned int> indices;
- unsigned int k = 0;
+ std::vector<size_type> indices;
+ size_type k = 0;
for (ITERATOR cell = begin; cell != end; ++cell, ++k)
{
indices.resize(cell->get_fe().dofs_per_cell);
inline
void
BlockList::initialize(
- unsigned int n_blocks,
+ size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end,
const std::vector<bool> &selected_dofs,
- unsigned int offset)
+ size_type offset)
{
index_sets.resize(n_blocks);
- std::vector<unsigned int> indices;
- unsigned int k = 0;
+ std::vector<size_type> indices;
+ size_type k = 0;
for (ITERATOR cell = begin; cell != end; ++cell, ++k)
{
indices.resize(cell->get_fe().dofs_per_cell);
inline
void
BlockList::initialize_mg(
- unsigned int n_blocks,
+ size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end,
const std::vector<bool> &selected_dofs,
- unsigned int offset)
+ size_type offset)
{
index_sets.resize(n_blocks);
- std::vector<unsigned int> indices;
- unsigned int k = 0;
+ std::vector<size_type> indices;
+ size_type k = 0;
for (ITERATOR cell = begin; cell != end; ++cell, ++k)
{
indices.resize(cell->get_fe().dofs_per_cell);
inline
void
BlockList::initialize_vertex_patches_mg(
- unsigned int n_blocks,
+ size_type n_blocks,
const ITERATOR begin,
const typename identity<ITERATOR>::type end,
const std::vector<bool> &selected_dofs,
- unsigned int offset)
+ size_type offset)
{
Assert(selected_dofs.size() == 0, ExcNotImplemented());
Assert(offset==0, ExcNotImplemented());
Assert(fe.dofs_per_vertex == 0, ExcNotImplemented());
index_sets.resize(n_blocks);
- std::vector<unsigned int> indices;
- unsigned int k = 0;
+ std::vector<size_type> indices;
+ size_type k = 0;
for (ITERATOR cell = begin; cell != end; ++cell)
{
if (cell_generates_vertex_patch<dim>(cell, true))
cell->neighbor(4)->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,1)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,3)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,4)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,1)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,3)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,4)] = numbers::invalid_dof_index;
}
add(k, indices);
cell->neighbor(4)->neighbor(0)->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,0)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,3)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,4)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,0)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,3)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,4)] = numbers::invalid_dof_index;
}
add(k, indices);
cell->neighbor(4)->neighbor(2)->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,1)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,2)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,4)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,1)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,2)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,4)] = numbers::invalid_dof_index;
}
add(k, indices);
cell->neighbor(4)->neighbor(2)->neighbor(0)->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,0)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,2)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,4)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,0)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,2)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,4)] = numbers::invalid_dof_index;
}
add(k, indices);
case 2:
cell->neighbor(2)->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,1)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,2)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,1)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,2)] = numbers::invalid_dof_index;
if (dim>2)
- indices[fe.face_to_cell_index(i,5)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,5)] = numbers::invalid_dof_index;
}
add(k, indices);
cell->neighbor(2)->neighbor(0)->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,0)] = numbers::invalid_unsigned_int;
- indices[fe.face_to_cell_index(i,2)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,0)] = numbers::invalid_dof_index;
+ indices[fe.face_to_cell_index(i,2)] = numbers::invalid_dof_index;
if (dim>2)
- indices[fe.face_to_cell_index(i,5)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,5)] = numbers::invalid_dof_index;
}
add(k, indices);
// no break here
cell->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,1)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,1)] = numbers::invalid_dof_index;
if (dim>1)
- indices[fe.face_to_cell_index(i,3)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,3)] = numbers::invalid_dof_index;
if (dim>2)
- indices[fe.face_to_cell_index(i,5)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,5)] = numbers::invalid_dof_index;
}
add(k, indices);
cell->neighbor(0)->get_mg_dof_indices(indices);
for (unsigned int i=0; i<fe.dofs_per_face; ++i)
{
- indices[fe.face_to_cell_index(i,0)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,0)] = numbers::invalid_dof_index;
if (dim>1)
- indices[fe.face_to_cell_index(i,3)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,3)] = numbers::invalid_dof_index;
if (dim>2)
- indices[fe.face_to_cell_index(i,5)] = numbers::invalid_unsigned_int;
+ indices[fe.face_to_cell_index(i,5)] = numbers::invalid_dof_index;
}
add(k, indices);
break;
inline
-unsigned int
+size_type
BlockList::size() const
{
return index_sets.size();
inline
-unsigned int
-BlockList::block_size(unsigned int block) const
+size_type
+BlockList::block_size(size_type block) const
{
return index_sets[block].size();
}
inline
BlockList::const_iterator
-BlockList::begin(unsigned int block) const
+BlockList::begin(size_type block) const
{
AssertIndexRange(block, index_sets.size());
return index_sets[block].begin();
inline
BlockList::const_iterator
-BlockList::end(unsigned int block) const
+BlockList::end(size_type block) const
{
AssertIndexRange(block, index_sets.size());
return index_sets[block].end();
inline
-unsigned int
-BlockList::local_index(unsigned int block, unsigned int index) const
+size_type
+BlockList::local_index(size_type block, size_type int index) const
{
AssertIndexRange(block, index_sets.size());
const block_container &b = index_sets[block];
- for (unsigned int i=0; i<b.size(); ++i)
+ for (size_type i=0; i<b.size(); ++i)
if (b[i] == index)
return i;
- return numbers::invalid_unsigned_int;
+ return numbers::invalid_size_type;
}
class BlockMatrixArray : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Default constructor creating a
* useless object. initialize()
* Constructor fixing the
* dimensions.
*/
- BlockMatrixArray (const unsigned int n_block_rows,
- const unsigned int n_block_cols);
+ BlockMatrixArray (const size_type n_block_rows,
+ const size_type n_block_cols);
/**
* Initialize object
* created by the default
* constructor.
*/
- void initialize (const unsigned int n_block_rows,
- const unsigned int n_block_cols);
+ void initialize (const size_type n_block_rows,
+ const size_type n_block_cols);
/**
* Constructor fixing the
* @deprecated The last argument is ignored. Use the constructor with only
* the first two arguments.
*/
- BlockMatrixArray (const unsigned int n_block_rows,
- const unsigned int n_block_cols,
+ BlockMatrixArray (const size_type n_block_rows,
+ const size_type n_block_cols,
VectorMemory<Vector<number> > &mem) DEAL_II_DEPRECATED;
/**
* @deprecated The last argument is ignored. Use the function with same name
* but only the first two arguments.
*/
- void initialize (const unsigned int n_block_rows,
- const unsigned int n_block_cols,
+ void initialize (const size_type n_block_rows,
+ const size_type n_block_cols,
VectorMemory<Vector<number> > &mem) DEAL_II_DEPRECATED;
/**
* Adjust the matrix to a new
* size and delete all blocks.
*/
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_cols);
+ void reinit (const size_type n_block_rows,
+ const size_type n_block_cols);
/**
* Add a block matrix entry. The
* the multiplication functions.
*/
template <class MATRIX>
- void enter (const MATRIX &matrix,
- const unsigned int row,
- const unsigned int col,
- const double prefix = 1.,
- const bool transpose = false);
+ void enter (const MATRIX &matrix,
+ const size_type row,
+ const size_type col,
+ const double prefix = 1.,
+ const bool transpose = false);
/**
* Add an entry like with enter,
*/
template <class MATRIX>
void enter_aux (VectorMemory<Vector<number> > &mem,
- const MATRIX &matrix,
- const unsigned int row,
- const unsigned int col,
- const double prefix = 1.,
- const bool transpose = false) DEAL_II_DEPRECATED;
+ const MATRIX &matrix,
+ const size_type row,
+ const size_type col,
+ const double prefix = 1.,
+ const bool transpose = false) DEAL_II_DEPRECATED;
/**
* Number of block-entries per
* column.
*/
- unsigned int n_block_rows () const;
+ size_type n_block_rows () const;
/**
* Number of block-entries per
* row.
*/
- unsigned int n_block_cols () const;
+ size_type n_block_cols () const;
/**
* Matrix-vector multiplication.
*/
template<class MATRIX>
Entry (const MATRIX &matrix,
- unsigned row, unsigned int col,
+ size_type row, size_type col,
double prefix, bool transpose);
/**
* Row number in the block
* matrix.
*/
- unsigned int row;
+ size_type row;
/**
* Column number in the block
* matrix.
*/
- unsigned int col;
+ size_type col;
/**
* Factor in front of the matrix
/**
* Number of blocks per column.
*/
- unsigned int block_rows;
+ size_type block_rows;
/**
* number of blocks per row.
*/
- unsigned int block_cols;
+ size_type block_cols;
};
/*@}*/
* <tt>n_blocks</tt> is the
* number of blocks in each direction.
*/
- BlockTrianglePrecondition (const unsigned int n_blocks);
+ BlockTrianglePrecondition (const size_type n_blocks);
/**
* Constructor. This matrix must be
* @deprecated The second argument is ignored. Use the constructor with only
* the first and third argument.
*/
- BlockTrianglePrecondition (const unsigned int n_block_rows,
+ BlockTrianglePrecondition (const size_type n_block_rows,
VectorMemory<Vector<number> > &mem,
const bool backward = false) DEAL_II_DEPRECATED;
* @deprecated The second argument
* is ignored. Use the function without that argument.
*/
- void initialize (const unsigned int n_block_rows,
+ void initialize (const size_type n_block_rows,
VectorMemory<Vector<number> > &mem,
const bool backward = false) DEAL_II_DEPRECATED;
* Resize preconditioner to a new
* size and clear all blocks.
*/
- void reinit (const unsigned int n_block_rows);
+ void reinit (const size_type n_block_rows);
/**
* matrices or preconditioners.
*/
template <class MATRIX>
- void enter (const MATRIX &matrix,
- const unsigned int row,
- const unsigned int col,
- const double prefix = 1.,
- const bool transpose = false);
+ void enter (const MATRIX &matrix,
+ const size_type row,
+ const size_type col,
+ const double prefix = 1.,
+ const bool transpose = false);
/**
* Enter a block. This calls
*/
template <class MATRIX>
void enter_aux (VectorMemory<Vector<double> > &mem,
- const MATRIX &matrix,
- const unsigned int row,
- const unsigned int col,
- const double prefix = 1.,
- const bool transpose = false) DEAL_II_DEPRECATED;
+ const MATRIX &matrix,
+ const size_type row,
+ const size_type col,
+ const double prefix = 1.,
+ const bool transpose = false) DEAL_II_DEPRECATED;
/**
* Preconditioning.
* matrix here.
*/
DeclException1(ExcNoDiagonal,
- unsigned int,
+ size_type,
<< "No diagonal entry was added for block " << arg1);
/**
* matrix here.
*/
DeclException1(ExcMultipleDiagonal,
- unsigned int,
+ size_type,
<< "Inverse diagonal entries may not be added in block "
<< arg1);
//@}
* for one row.
*/
void do_row (BlockVector<number> &dst,
- unsigned int row_num) const;
+ size_type row_num) const;
/**
* Flag for backward insertion.
inline
BlockMatrixArray<number>::Entry::Entry (
const MATRIX &m,
- unsigned int row,
- unsigned int col,
+ size_type row,
+ size_type col,
double prefix,
bool transpose)
:
void
BlockMatrixArray<number>::enter (
const MATRIX &matrix,
- unsigned int row,
- unsigned int col,
+ size_type row,
+ size_type col,
double prefix,
bool transpose)
{
BlockMatrixArray<number>::enter_aux (
VectorMemory<Vector<number> > &mem,
const MATRIX &matrix,
- unsigned int row,
- unsigned int col,
+ size_type row,
+ size_type col,
double prefix,
bool transpose)
{
typename std::vector<Entry>::const_iterator m = entries.begin();
typename std::vector<Entry>::const_iterator end = entries.end();
- unsigned int matrix_number = 0;
+ size_type matrix_number = 0;
for (; m != end ; ++m)
{
if (matrix_names.find(m->matrix) == matrix_names.end())
array(m->row, m->col) += stream.str();
}
- for (unsigned int i=0; i<n_block_rows(); ++i)
- for (unsigned int j=0; j<n_block_cols(); ++j)
+ for (size_type i=0; i<n_block_rows(); ++i)
+ for (size_type j=0; j<n_block_cols(); ++j)
{
out << '\t' << array(i,j);
if (j==n_block_cols()-1)
inline
void
BlockTrianglePrecondition<number>::enter (const MATRIX &matrix,
- unsigned row, unsigned int col,
+ size_type row, size_type col,
double prefix, bool transpose)
{
BlockMatrixArray<number>::enter(matrix, row, col, prefix, transpose);
BlockTrianglePrecondition<number>::enter_aux (
VectorMemory<Vector<double> > &mem,
const MATRIX &matrix,
- unsigned int row,
- unsigned int col,
+ size_type row,
+ size_type col,
double prefix,
bool transpose)
{
* element represented by
* this object.
*/
- unsigned int block_row() const;
+ size_type block_row() const;
/**
* Block column of the
* element represented by
* this object.
*/
- unsigned int block_column() const;
+ size_type block_column() const;
protected:
/**
* Block row into which we presently
* point.
*/
- unsigned int row_block;
+ size_type row_block;
/**
* Block column into which we
* presently point.
*/
- unsigned int col_block;
+ size_type col_block;
/**
* Let the iterator class be a
* of rows in the matrix.
*/
Accessor (BlockMatrix *m,
- const unsigned int row,
- const unsigned int col);
+ const size_type row,
+ const size_type col);
/**
* Row number of the element
* represented by this
* object.
*/
- unsigned int row() const;
+ size_type row() const;
/**
* Column number of the
* element represented by
* this object.
*/
- unsigned int column() const;
+ size_type column() const;
/**
* Value of the entry at the
* of rows in the matrix.
*/
Accessor (const BlockMatrix *m,
- const unsigned int row,
- const unsigned int col);
+ const size_type row,
+ const size_type col);
/**
* Initalize const accessor
* represented by this
* object.
*/
- unsigned int row() const;
+ size_type row() const;
/**
* Column number of the
* element represented by
* this object.
*/
- unsigned int column() const;
+ size_type column() const;
/**
* Value of the entry at the
* given coordinates.
*/
BlockType &
- block (const unsigned int row,
- const unsigned int column);
+ block (const size_type row,
+ const size_type column);
/**
* constant objects.
*/
const BlockType &
- block (const unsigned int row,
- const unsigned int column) const;
+ block (const size_type row,
+ const size_type column) const;
/**
* Return the dimension of the
* matrix is of dimension
* $m \times n$.
*/
- unsigned int m () const;
+ size_type m () const;
/**
* Return the dimension of the
* matrix is of dimension
* $m \times n$.
*/
- unsigned int n () const;
+ size_type n () const;
/**
* sparsity pattern is presently
* associated to this matrix.
*/
- unsigned int n_block_rows () const;
+ size_type n_block_rows () const;
/**
* Return the number of blocks in
* sparsity pattern is presently
* associated to this matrix.
*/
- unsigned int n_block_cols () const;
+ size_type n_block_cols () const;
/**
* Set the element <tt>(i,j)</tt>
* is allowed to store zero
* values in non-existent fields.
*/
- void set (const unsigned int i,
- const unsigned int j,
+ void set (const size_type i,
+ const size_type j,
const value_type value);
/**
* values are treated.
*/
template <typename number>
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = false);
+ void set (const std::vector<size_type> &indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = false);
/**
* Same function as before, but now
* on rows and columns, respectively.
*/
template <typename number>
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = false);
+ void set (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = false);
/**
* Set several elements in the
* values are treated.
*/
template <typename number>
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values = false);
+ void set (const size_type row,
+ const std::vector<size_type> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values = false);
/**
* Set several elements to values
* values are inserted/replaced.
*/
template <typename number>
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values = false);
+ void set (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number *values,
+ const bool elide_zero_values = false);
/**
* Add <tt>value</tt> to the
* is allowed to store zero
* values in non-existent fields.
*/
- void add (const unsigned int i,
- const unsigned int j,
+ void add (const size_type i,
+ const size_type j,
const value_type value);
/**
* into the matrix.
*/
template <typename number>
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = true);
+ void add (const std::vector<size_type> &indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = true);
/**
* Same function as before, but now
* on rows and columns, respectively.
*/
template <typename number>
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = true);
+ void add (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = true);
/**
* Set several elements in the
* into the matrix.
*/
template <typename number>
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values = true);
+ void add (const size_type row,
+ const std::vector<size_type> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values = true);
/**
* Add an array of values given by
* into the matrix.
*/
template <typename number>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
+ void add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
/**
* Add <tt>matrix</tt> scaled by
* if the wanted element does not
* exist in the matrix.
*/
- value_type operator () (const unsigned int i,
- const unsigned int j) const;
+ value_type operator () (const size_type i,
+ const size_type j) const;
/**
* This function is mostly like
* solution, since the sparsity
* of the matrix is not used.
*/
- value_type el (const unsigned int i,
- const unsigned int j) const;
+ value_type el (const size_type i,
+ const size_type j) const;
/**
* Return the main diagonal element in
* involve searching for the
* right column number.
*/
- value_type diag_element (const unsigned int i) const;
+ value_type diag_element (const size_type i) const;
/**
* Call the compress() function on all
* STL-like iterator with the
* first entry of row <tt>r</tt>.
*/
- iterator begin (const unsigned int r);
+ iterator begin (const size_type r);
/**
* Final iterator of row <tt>r</tt>.
*/
- iterator end (const unsigned int r);
+ iterator end (const size_type r);
/**
* STL-like iterator with the
* first entry.
* STL-like iterator with the
* first entry of row <tt>r</tt>.
*/
- const_iterator begin (const unsigned int r) const;
+ const_iterator begin (const size_type r) const;
/**
* Final iterator of row <tt>r</tt>.
*/
- const_iterator end (const unsigned int r) const;
+ const_iterator end (const size_type r) const;
/**
* Return a reference to the underlying
* individual blocks when doing a
* collective add or set.
*/
- std::vector<unsigned int> counter_within_block;
+ std::vector<size_type> counter_within_block;
/**
* Temporary vector for column
* local to global data on each
* sparse matrix.
*/
- std::vector<std::vector<unsigned int> > column_indices;
+ std::vector<std::vector<size_type> > column_indices;
/**
* Temporary vector for storing the
template <class BlockMatrix>
inline
- unsigned int
+ size_type
AccessorBase<BlockMatrix>::block_row() const
{
- Assert (row_block != numbers::invalid_unsigned_int,
+ Assert (row_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return row_block;
template <class BlockMatrix>
inline
- unsigned int
+ size_type
AccessorBase<BlockMatrix>::block_column() const
{
- Assert (col_block != numbers::invalid_unsigned_int,
+ Assert (col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return col_block;
inline
Accessor<BlockMatrix, true>::Accessor (
const BlockMatrix *matrix,
- const unsigned int row,
- const unsigned int col)
+ const size_type row,
+ const size_type col)
:
matrix(matrix),
base_iterator(matrix->block(0,0).begin())
// the end of the matrix
if (row < matrix->m())
{
- const std::pair<unsigned int,unsigned int> indices
+ const std::pair<size_type,size_type> indices
= matrix->row_block_indices.global_to_local(row);
// find the first block that does
// have an entry in this row
- for (unsigned int bc=0; bc<matrix->n_block_cols(); ++bc)
+ for (size_type bc=0; bc<matrix->n_block_cols(); ++bc)
{
base_iterator
= matrix->block(indices.first, bc).begin(indices.second);
{
// we were asked to create the end
// iterator for this matrix
- this->row_block = numbers::invalid_unsigned_int;
- this->col_block = numbers::invalid_unsigned_int;
+ this->row_block = numbers::invalid_size_type;
+ this->col_block = numbers::invalid_size_type;
}
}
template <class BlockMatrix>
inline
- unsigned int
+ size_type
Accessor<BlockMatrix, true>::row() const
{
- Assert (this->row_block != numbers::invalid_unsigned_int,
+ Assert (this->row_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return (matrix->row_block_indices.local_to_global(this->row_block, 0) +
template <class BlockMatrix>
inline
- unsigned int
+ size_type
Accessor<BlockMatrix, true>::column() const
{
- Assert (this->col_block != numbers::invalid_unsigned_int,
+ Assert (this->col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return (matrix->column_block_indices.local_to_global(this->col_block,0) +
typename Accessor<BlockMatrix, true>::value_type
Accessor<BlockMatrix, true>::value () const
{
- Assert (this->row_block != numbers::invalid_unsigned_int,
+ Assert (this->row_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
- Assert (this->col_block != numbers::invalid_unsigned_int,
+ Assert (this->col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return base_iterator->value();
void
Accessor<BlockMatrix, true>::advance ()
{
- Assert (this->row_block != numbers::invalid_unsigned_int,
+ Assert (this->row_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
- Assert (this->col_block != numbers::invalid_unsigned_int,
+ Assert (this->col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
// Remember current row inside block
- unsigned int local_row = base_iterator->row();
+ size_type local_row = base_iterator->row();
// Advance one element inside the
// current block
++this->row_block;
if (this->row_block == matrix->n_block_rows())
{
- this->row_block = numbers::invalid_unsigned_int;
- this->col_block = numbers::invalid_unsigned_int;
+ this->row_block = numbers::invalid_size_type;
+ this->col_block = numbers::invalid_size_type;
return;
}
}
// have to have the same
// base_iterator representation, but
// valid iterators have to
- return (((this->row_block == numbers::invalid_unsigned_int)
+ return (((this->row_block == numbers::invalid_size_type)
&&
- (this->col_block == numbers::invalid_unsigned_int))
+ (this->col_block == numbers::invalid_size_type))
||
(base_iterator == a.base_iterator));
inline
Accessor<BlockMatrix, false>::Accessor (
BlockMatrix *matrix,
- const unsigned int row,
- const unsigned int col)
+ const size_type row,
+ const size_type col)
:
matrix(matrix),
base_iterator(matrix->block(0,0).begin())
// the end of the matrix
if (row < matrix->m())
{
- const std::pair<unsigned int,unsigned int> indices
+ const std::pair<size_type,size_type> indices
= matrix->row_block_indices.global_to_local(row);
// find the first block that does
// have an entry in this row
- for (unsigned int bc=0; bc<matrix->n_block_cols(); ++bc)
+ for (size_type bc=0; bc<matrix->n_block_cols(); ++bc)
{
base_iterator
= matrix->block(indices.first, bc).begin(indices.second);
{
// we were asked to create the end
// iterator for this matrix
- this->row_block = numbers::invalid_unsigned_int;
- this->col_block = numbers::invalid_unsigned_int;
+ this->row_block = numbers::invalid_size_type;
+ this->col_block = numbers::invalid_size_type;
}
}
template <class BlockMatrix>
inline
- unsigned int
+ size_type
Accessor<BlockMatrix, false>::row() const
{
- Assert (this->row_block != numbers::invalid_unsigned_int,
+ Assert (this->row_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return (matrix->row_block_indices.local_to_global(this->row_block, 0) +
template <class BlockMatrix>
inline
- unsigned int
+ size_type
Accessor<BlockMatrix, false>::column() const
{
- Assert (this->col_block != numbers::invalid_unsigned_int,
+ Assert (this->col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return (matrix->column_block_indices.local_to_global(this->col_block,0) +
typename Accessor<BlockMatrix, false>::value_type
Accessor<BlockMatrix, false>::value () const
{
- Assert (this->row_block != numbers::invalid_unsigned_int,
+ Assert (this->row_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
- Assert (this->col_block != numbers::invalid_unsigned_int,
+ Assert (this->col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
return base_iterator->value();
void
Accessor<BlockMatrix, false>::set_value (typename Accessor<BlockMatrix, false>::value_type newval) const
{
- Assert (this->row_block != numbers::invalid_unsigned_int,
+ Assert (this->row_block != numbers::invalid_size_type
ExcIteratorPastEnd());
- Assert (this->col_block != numbers::invalid_unsigned_int,
+ Assert (this->col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
base_iterator->value() = newval;
void
Accessor<BlockMatrix, false>::advance ()
{
- Assert (this->row_block != numbers::invalid_unsigned_int,
+ Assert (this->row_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
- Assert (this->col_block != numbers::invalid_unsigned_int,
+ Assert (this->col_block != numbers::invalid_size_type,
ExcIteratorPastEnd());
// Remember current row inside block
- unsigned int local_row = base_iterator->row();
+ size_type local_row = base_iterator->row();
// Advance one element inside the
// current block
++this->row_block;
if (this->row_block == matrix->n_block_rows())
{
- this->row_block = numbers::invalid_unsigned_int;
- this->col_block = numbers::invalid_unsigned_int;
+ this->row_block = numbers::invalid_size_type;
+ this->col_block = numbers::invalid_size_type;
return;
}
}
// have to have the same
// base_iterator representation, but
// valid iterators have to
- return (((this->row_block == numbers::invalid_unsigned_int)
+ return (((this->row_block == numbers::invalid_size_type)
&&
- (this->col_block == numbers::invalid_unsigned_int))
+ (this->col_block == numbers::invalid_size_type))
||
(base_iterator == a.base_iterator));
BlockMatrixBase<MatrixType>::
copy_from (const BlockMatrixType &source)
{
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
block(r,c).copy_from (source.block(r,c));
return *this;
MemoryConsumption::memory_consumption(column_indices)+
MemoryConsumption::memory_consumption(column_values);
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
{
MatrixType *p = this->sub_objects[r][c];
mem += MemoryConsumption::memory_consumption(*p);
void
BlockMatrixBase<MatrixType>::clear ()
{
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
{
MatrixType *p = this->sub_objects[r][c];
this->sub_objects[r][c] = 0;
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::BlockType &
-BlockMatrixBase<MatrixType>::block (const unsigned int row,
- const unsigned int column)
+BlockMatrixBase<MatrixType>::block (const size_type row,
+ const size_type column)
{
Assert (row<n_block_rows(),
ExcIndexRange (row, 0, n_block_rows()));
template <class MatrixType>
inline
const typename BlockMatrixBase<MatrixType>::BlockType &
-BlockMatrixBase<MatrixType>::block (const unsigned int row,
- const unsigned int column) const
+BlockMatrixBase<MatrixType>::block (const size_type row,
+ const size_type column) const
{
Assert (row<n_block_rows(),
ExcIndexRange (row, 0, n_block_rows()));
template <class MatrixType>
inline
-unsigned int
+size_type
BlockMatrixBase<MatrixType>::m () const
{
return row_block_indices.total_size();
template <class MatrixType>
inline
-unsigned int
+size_type
BlockMatrixBase<MatrixType>::n () const
{
return column_block_indices.total_size();
template <class MatrixType>
inline
-unsigned int
+size_type
BlockMatrixBase<MatrixType>::n_block_cols () const
{
return column_block_indices.size();
template <class MatrixType>
inline
-unsigned int
+size_type
BlockMatrixBase<MatrixType>::n_block_rows () const
{
return row_block_indices.size();
template <class MatrixType>
inline
void
-BlockMatrixBase<MatrixType>::set (const unsigned int i,
- const unsigned int j,
+BlockMatrixBase<MatrixType>::set (const size_type i,
+ const size_type j,
const value_type value)
{
prepare_set_operation();
Assert (numbers::is_finite(value), ExcNumberNotFinite());
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
row_index = row_block_indices.global_to_local (i),
col_index = column_block_indices.global_to_local (j);
block(row_index.first,col_index.first).set (row_index.second,
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
+BlockMatrixBase<MatrixType>::set (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
const FullMatrix<number> &values,
const bool elide_zero_values)
{
Assert (col_indices.size() == values.n(),
ExcDimensionMismatch(col_indices.size(), values.n()));
- for (unsigned int i=0; i<row_indices.size(); ++i)
+ for (size_type i=0; i<row_indices.size(); ++i)
set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
elide_zero_values);
}
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::set (const std::vector<unsigned int> &indices,
+BlockMatrixBase<MatrixType>::set (const std::vector<size_type> &indices,
const FullMatrix<number> &values,
const bool elide_zero_values)
{
ExcDimensionMismatch(indices.size(), values.m()));
Assert (values.n() == values.m(), ExcNotQuadratic());
- for (unsigned int i=0; i<indices.size(); ++i)
+ for (size_type i=0; i<indices.size(); ++i)
set (indices[i], indices.size(), &indices[0], &values(i,0),
elide_zero_values);
}
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values)
+BlockMatrixBase<MatrixType>::set (const size_type row,
+ const std::vector<size_type> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values)
+BlockMatrixBase<MatrixType>::set (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number *values,
+ const bool elide_zero_values)
{
prepare_set_operation();
// through all of them.
if (column_indices[0].size() < n_cols)
{
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ for (size_type i=0; i<this->n_block_cols(); ++i)
{
column_indices[i].resize(n_cols);
column_values[i].resize(n_cols);
// Reset the number of added elements
// in each block to zero.
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ for (size_type i=0; i<this->n_block_cols(); ++i)
counter_within_block[i] = 0;
// Go through the column indices to
// is stored contiguously (in fact,
// indices will be intermixed when it
// comes from an element matrix).
- for (unsigned int j=0; j<n_cols; ++j)
+ for (size_type j=0; j<n_cols; ++j)
{
double value = values[j];
if (value == 0 && elide_zero_values == true)
continue;
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
col_index = this->column_block_indices.global_to_local(col_indices[j]);
- const unsigned int local_index = counter_within_block[col_index.first]++;
+ const size_type local_index = counter_within_block[col_index.first]++;
column_indices[col_index.first][local_index] = col_index.second;
column_values[col_index.first][local_index] = value;
#ifdef DEBUG
// If in debug mode, do a check whether
// the right length has been obtained.
- unsigned int length = 0;
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ size_type length = 0;
+ for (size_type i=0; i<this->n_block_cols(); ++i)
length += counter_within_block[i];
Assert (length <= n_cols, ExcInternalError());
#endif
// where we should start reading out
// data. Now let's write the data into
// the individual blocks!
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
row_index = this->row_block_indices.global_to_local (row);
- for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
+ for (size_type block_col=0; block_col<n_block_cols(); ++block_col)
{
if (counter_within_block[block_col] == 0)
continue;
template <class MatrixType>
inline
void
-BlockMatrixBase<MatrixType>::add (const unsigned int i,
- const unsigned int j,
+BlockMatrixBase<MatrixType>::add (const size_type i,
+ const size_type j,
const value_type value)
{
(value == value_type()))
return;
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
row_index = row_block_indices.global_to_local (i),
col_index = column_block_indices.global_to_local (j);
block(row_index.first,col_index.first).add (row_index.second,
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &values,
- const bool elide_zero_values)
+BlockMatrixBase<MatrixType>::add (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
+ const FullMatrix<number> &values,
+ const bool elide_zero_values)
{
Assert (row_indices.size() == values.m(),
ExcDimensionMismatch(row_indices.size(), values.m()));
Assert (col_indices.size() == values.n(),
ExcDimensionMismatch(col_indices.size(), values.n()));
- for (unsigned int i=0; i<row_indices.size(); ++i)
+ for (size_type i=0; i<row_indices.size(); ++i)
add (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
elide_zero_values);
}
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::add (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &values,
- const bool elide_zero_values)
+BlockMatrixBase<MatrixType>::add (const std::vector<size_type> &indices,
+ const FullMatrix<number> &values,
+ const bool elide_zero_values)
{
Assert (indices.size() == values.m(),
ExcDimensionMismatch(indices.size(), values.m()));
Assert (values.n() == values.m(), ExcNotQuadratic());
- for (unsigned int i=0; i<indices.size(); ++i)
+ for (size_type i=0; i<indices.size(); ++i)
add (indices[i], indices.size(), &indices[0], &values(i,0),
elide_zero_values);
}
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values)
+BlockMatrixBase<MatrixType>::add (const size_type row,
+ const std::vector<size_type> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
template <typename number>
inline
void
-BlockMatrixBase<MatrixType>::add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values,
- const bool col_indices_are_sorted)
+BlockMatrixBase<MatrixType>::add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number *values,
+ const bool elide_zero_values,
+ const bool col_indices_are_sorted)
{
prepare_add_operation();
#ifdef DEBUG
// check whether indices really are
// sorted.
- unsigned int before = col_indices[0];
- for (unsigned int i=1; i<n_cols; ++i)
+ size_type before = col_indices[0];
+ for (size_type i=1; i<n_cols; ++i)
if (col_indices[i] <= before)
Assert (false, ExcMessage ("Flag col_indices_are_sorted is set, but "
"indices appear to not be sorted."))
else
before = col_indices[i];
#endif
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
row_index = this->row_block_indices.global_to_local (row);
if (this->n_block_cols() > 1)
{
- const unsigned int *first_block = Utilities::lower_bound (col_indices,
- col_indices+n_cols,
- this->column_block_indices.block_start(1));
+ const size_type *first_block = Utilities::lower_bound (col_indices,
+ col_indices+n_cols,
+ this->column_block_indices.block_start(1));
- const unsigned int n_zero_block_indices = first_block - col_indices;
+ const size_type n_zero_block_indices = first_block - col_indices;
block(row_index.first, 0).add (row_index.second,
n_zero_block_indices,
col_indices,
// through all of them.
if (column_indices[0].size() < n_cols)
{
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ for (size_type i=0; i<this->n_block_cols(); ++i)
{
column_indices[i].resize(n_cols);
column_values[i].resize(n_cols);
// Reset the number of added elements
// in each block to zero.
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ for (size_type i=0; i<this->n_block_cols(); ++i)
counter_within_block[i] = 0;
// Go through the column indices to
// block is stored contiguously (in
// fact, data will be intermixed when
// it comes from an element matrix).
- for (unsigned int j=0; j<n_cols; ++j)
+ for (size_type j=0; j<n_cols; ++j)
{
double value = values[j];
if (value == 0 && elide_zero_values == true)
continue;
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
col_index = this->column_block_indices.global_to_local(col_indices[j]);
- const unsigned int local_index = counter_within_block[col_index.first]++;
+ const size_type local_index = counter_within_block[col_index.first]++;
column_indices[col_index.first][local_index] = col_index.second;
column_values[col_index.first][local_index] = value;
#ifdef DEBUG
// If in debug mode, do a check whether
// the right length has been obtained.
- unsigned int length = 0;
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ size_type length = 0;
+ for (size_type i=0; i<this->n_block_cols(); ++i)
length += counter_within_block[i];
Assert (length <= n_cols, ExcInternalError());
#endif
// where we should start reading out
// data. Now let's write the data into
// the individual blocks!
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
row_index = this->row_block_indices.global_to_local (row);
- for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
+ for (size_type block_col=0; block_col<n_block_cols(); ++block_col)
{
if (counter_within_block[block_col] == 0)
continue;
(factor == 0))
return;
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
// This function should throw if the sparsity
// patterns of the two blocks differ
block(row, col).add(factor, matrix.block(row,col));
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::value_type
-BlockMatrixBase<MatrixType>::operator () (const unsigned int i,
- const unsigned int j) const
+BlockMatrixBase<MatrixType>::operator () (const size_type i,
+ const size_type j) const
{
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
row_index = row_block_indices.global_to_local (i),
col_index = column_block_indices.global_to_local (j);
return block(row_index.first,col_index.first) (row_index.second,
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::value_type
-BlockMatrixBase<MatrixType>::el (const unsigned int i,
- const unsigned int j) const
+BlockMatrixBase<MatrixType>::el (const size_type i,
+ const size_type j) const
{
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
row_index = row_block_indices.global_to_local (i),
col_index = column_block_indices.global_to_local (j);
return block(row_index.first,col_index.first).el (row_index.second,
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::value_type
-BlockMatrixBase<MatrixType>::diag_element (const unsigned int i) const
+BlockMatrixBase<MatrixType>::diag_element (const size_type i) const
{
Assert (n_block_rows() == n_block_cols(),
ExcNotQuadratic());
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
index = row_block_indices.global_to_local (i);
return block(index.first,index.first).diag_element(index.second);
}
void
BlockMatrixBase<MatrixType>::compress (::dealii::VectorOperation::values operation)
{
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
block(r,c).compress (operation);
}
Assert (n_block_cols() != 0, ExcNotInitialized());
Assert (n_block_rows() != 0, ExcNotInitialized());
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
block(r,c) *= factor;
return *this;
const value_type factor_inv = 1. / factor;
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
block(r,c) *= factor_inv;
return *this;
Assert (src.n_blocks() == n_block_cols(),
ExcDimensionMismatch(src.n_blocks(), n_block_cols()));
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
{
block(row,0).vmult (dst.block(row),
src.block(0));
- for (unsigned int col=1; col<n_block_cols(); ++col)
+ for (size_type col=1; col<n_block_cols(); ++col)
block(row,col).vmult_add (dst.block(row),
src.block(col));
};
ExcDimensionMismatch(src.n_blocks(), n_block_cols()));
block(0,0).vmult (dst, src.block(0));
- for (unsigned int col=1; col<n_block_cols(); ++col)
+ for (size_type col=1; col<n_block_cols(); ++col)
block(0,col).vmult_add (dst, src.block(col));
}
Assert (1 == n_block_cols(),
ExcDimensionMismatch(1, n_block_cols()));
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
block(row,0).vmult (dst.block(row),
src);
}
Assert (src.n_blocks() == n_block_cols(),
ExcDimensionMismatch(src.n_blocks(), n_block_cols()));
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
{
block(row,0).vmult_add (dst.block(row),
src.block(0));
- for (unsigned int col=1; col<n_block_cols(); ++col)
+ for (size_type col=1; col<n_block_cols(); ++col)
block(row,col).vmult_add (dst.block(row),
src.block(col));
};
dst = 0.;
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
{
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row,col).Tvmult_add (dst.block(col),
src.block(row));
};
dst = 0.;
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(0,col).Tvmult_add (dst.block(col), src);
}
block(0,0).Tvmult (dst, src.block(0));
- for (unsigned int row=1; row<n_block_rows(); ++row)
+ for (size_type row=1; row<n_block_rows(); ++row)
block(row,0).Tvmult_add (dst, src.block(row));
}
Assert (src.n_blocks() == n_block_rows(),
ExcDimensionMismatch(src.n_blocks(), n_block_rows()));
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
{
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row,col).Tvmult_add (dst.block(col),
src.block(row));
};
ExcDimensionMismatch(v.n_blocks(), n_block_rows()));
value_type norm_sqr = 0;
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
if (row==col)
norm_sqr += block(row,col).matrix_norm_square (v.block(row));
else
ExcDimensionMismatch(v.n_blocks(), n_block_cols()));
value_type result = 0;
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
result += block(row,col).matrix_scalar_product (u.block(row),
v.block(col));
return result;
// perform a sign change of the
// first two term before, and after
// adding up
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
{
block(row,0).residual (dst.block(row),
x.block(0),
b.block(row));
- for (unsigned int i=0; i<dst.block(row).size(); ++i)
+ for (size_type i=0; i<dst.block(row).size(); ++i)
dst.block(row)(i) = -dst.block(row)(i);
- for (unsigned int col=1; col<n_block_cols(); ++col)
+ for (size_type col=1; col<n_block_cols(); ++col)
block(row,col).vmult_add (dst.block(row),
x.block(col));
- for (unsigned int i=0; i<dst.block(row).size(); ++i)
+ for (size_type i=0; i<dst.block(row).size(); ++i)
dst.block(row)(i) = -dst.block(row)(i);
};
value_type res = 0;
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
res += dst.block(row).norm_sqr ();
return std::sqrt(res);
}
BlockMatrixBase<MatrixType>::print (std::ostream &out,
const bool alternative_output) const
{
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
{
if (!alternative_output)
out << "Block (" << row << ", " << col << ")" << std::endl;
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::const_iterator
-BlockMatrixBase<MatrixType>::begin (const unsigned int r) const
+BlockMatrixBase<MatrixType>::begin (const size_type r) const
{
Assert (r<m(), ExcIndexRange(r,0,m()));
return const_iterator(this, r);
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::const_iterator
-BlockMatrixBase<MatrixType>::end (const unsigned int r) const
+BlockMatrixBase<MatrixType>::end (const size_type r) const
{
Assert (r<m(), ExcIndexRange(r,0,m()));
return const_iterator(this, r+1);
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::iterator
-BlockMatrixBase<MatrixType>::begin (const unsigned int r)
+BlockMatrixBase<MatrixType>::begin (const size_type r)
{
Assert (r<m(), ExcIndexRange(r,0,m()));
return iterator(this, r);
template <class MatrixType>
inline
typename BlockMatrixBase<MatrixType>::iterator
-BlockMatrixBase<MatrixType>::end (const unsigned int r)
+BlockMatrixBase<MatrixType>::end (const size_type r)
{
Assert (r<m(), ExcIndexRange(r,0,m()));
return iterator(this, r+1);
void
BlockMatrixBase<MatrixType>::collect_sizes ()
{
- std::vector<unsigned int> row_sizes (this->n_block_rows());
- std::vector<unsigned int> col_sizes (this->n_block_cols());
+ std::vector<size_type> row_sizes (this->n_block_rows());
+ std::vector<size_type> col_sizes (this->n_block_cols());
// first find out the row sizes
// from the first block column
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
row_sizes[r] = sub_objects[r][0]->m();
// then check that the following
// block columns have the same
// sizes
- for (unsigned int c=1; c<this->n_block_cols(); ++c)
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=1; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
Assert (row_sizes[r] == sub_objects[r][c]->m(),
ExcIncompatibleRowNumbers (r,0,r,c));
// then do the same with the columns
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
col_sizes[c] = sub_objects[0][c]->n();
- for (unsigned int r=1; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=1; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
Assert (col_sizes[c] == sub_objects[r][c]->n(),
ExcIncompatibleRowNumbers (0,c,r,c));
void
BlockMatrixBase<MatrixType>::prepare_add_operation ()
{
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row, col).prepare_add();
}
void
BlockMatrixBase<MatrixType>::prepare_set_operation ()
{
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row, col).prepare_set();
}
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix<number> >
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Typedef the base class for simpler
* access to its own typedefs.
* Return the number of entries
* in a specific row.
*/
- unsigned int get_row_length (const types::global_dof_index row) const;
+ size_type get_row_length (const size_type row) const;
/**
* Return the number of nonzero
* the entries should happen to
* be zero, it is counted anyway.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Return the number of actually
* (with absolute value larger than
* threshold) of all the blocks.
*/
- unsigned int n_actually_nonzero_elements (const double threshold = 0.0) const;
+ size_type n_actually_nonzero_elements (const double threshold = 0.0) const;
/**
* Return a (constant) reference
{
Assert (d==0, ExcScalarAssignmentOnlyForZeroValue());
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
this->block(r,c) = d;
return *this;
// do a diagonal preconditioning. uses only
// the diagonal blocks of the matrix
- for (unsigned int i=0; i<this->n_block_rows(); ++i)
+ for (size_type i=0; i<this->n_block_rows(); ++i)
this->block(i,i).precondition_Jacobi (dst.block(i),
src.block(i),
omega);
// anything except than checking
// whether the base objects want to
// do something
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
this->block(r,c) = m.block(r,c);
return *this;
sparsity.n_block_cols());
// and reinitialize the blocks
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
{
BlockType *p = new SparseMatrix<number>();
p->reinit (sparsity.block(r,c));
bool
BlockSparseMatrix<number>::empty () const
{
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
if (this->block(r,c).empty () == false)
return false;
template <typename number>
-unsigned int
-BlockSparseMatrix<number>::get_row_length (const types::global_dof_index row) const
+size_type
+BlockSparseMatrix<number>::get_row_length (const size_type row) const
{
return sparsity_pattern->row_length(row);
}
template <typename number>
-unsigned int
+size_type
BlockSparseMatrix<number>::n_nonzero_elements () const
{
return sparsity_pattern->n_nonzero_elements ();
template <typename number>
-unsigned int
+size_type
BlockSparseMatrix<number>::n_actually_nonzero_elements (const double threshold) const
{
- unsigned int count = 0;
- for (unsigned int i=0; i<this->n_block_rows(); ++i)
- for (unsigned int j=0; j<this->n_block_cols(); ++j)
+ size_type count = 0;
+ for (size_type i=0; i<this->n_block_rows(); ++i)
+ for (size_type j=0; j<this->n_block_cols(); ++j)
count += this->sub_objects[i][j]->n_actually_nonzero_elements (threshold);
return count;
const char *zero_string,
const double denominator) const
{
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
{
out << "Component (" << r << "," << c << ")" << std::endl;
this->block(r,c).print_formatted (out, precision, scientific,
{
std::size_t mem = sizeof(*this);
mem += MemoryConsumption::memory_consumption (this->sub_objects);
- for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ for (size_type r=0; r<this->n_block_rows(); ++r)
+ for (size_type c=0; c<this->n_block_cols(); ++c)
mem += MemoryConsumption::memory_consumption(*this->sub_objects[r][c]);
return mem;
class BlockSparseMatrixEZ : public Subscriptor
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Default constructor. The
* result is an empty object with
* blocks themselves still have
* zero dimension.
*/
- BlockSparseMatrixEZ (const unsigned int block_rows,
- const unsigned int block_cols);
+ BlockSparseMatrixEZ (const size_type block_rows,
+ const size_type block_cols);
/**
* Copy constructor. This is
* called to update internal data
* structures.
*/
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_cols);
+ void reinit (const size_type n_block_rows,
+ const size_type n_block_cols);
/**
* This function collects the
* sizes of the sub-objects and
* given coordinates.
*/
SparseMatrixEZ<Number> &
- block (const unsigned int row,
- const unsigned int column);
+ block (const size_type row,
+ const size_type column);
/**
* constant objects.
*/
const SparseMatrixEZ<Number> &
- block (const unsigned int row,
- const unsigned int column) const;
+ block (const size_type row,
+ const size_type column) const;
/**
* Return the number of blocks in a
* column.
*/
- unsigned int n_block_rows () const;
+ size_type n_block_rows () const;
/**
* Return the number of blocks in a
* row.
*/
- unsigned int n_block_cols () const;
+ size_type n_block_cols () const;
/**
* Return whether the object is
* space. It is the sum of rows
* of the rows of sub-matrices.
*/
- types::global_dof_index n_rows () const;
+ size_type n_rows () const;
/**
* Return number of columns of
* columns of the columns of
* sub-matrices.
*/
- types::global_dof_index n_cols () const;
+ size_type n_cols () const;
/**
* Return the dimension of the
* matrix is of dimension
* $m \times n$.
*/
- types::global_dof_index m () const;
+ size_type m () const;
/**
* Return the dimension of the
* matrix is of dimension
* $m \times n$.
*/
- types::global_dof_index n () const;
+ size_type n () const;
/**
* Set the element <tt>(i,j)</tt>
* allowed to store zero values
* in non-existent fields.
*/
- void set (const types::global_dof_index i,
- const types::global_dof_index j,
+ void set (const size_type i,
+ const size_type j,
const Number value);
/**
* is allowed to store zero
* values in non-existent fields.
*/
- void add (const types::global_dof_index i, const types::global_dof_index j,
+ void add (const size_type i, const size_type j,
const Number value);
template <typename Number>
inline
-unsigned int
+size_type
BlockSparseMatrixEZ<Number>::n_block_rows () const
{
return row_indices.size();
template <typename Number>
inline
-unsigned int
+size_type
BlockSparseMatrixEZ<Number>::n_rows () const
{
return row_indices.total_size();
template <typename Number>
inline
-unsigned int
+size_type
BlockSparseMatrixEZ<Number>::n_block_cols () const
{
return column_indices.size();
template <typename Number>
inline
-unsigned int
+size_type
BlockSparseMatrixEZ<Number>::n_cols () const
{
return column_indices.total_size();
template <typename Number>
inline
SparseMatrixEZ<Number> &
-BlockSparseMatrixEZ<Number>::block (const unsigned int row,
- const unsigned int column)
+BlockSparseMatrixEZ<Number>::block (const size_type row,
+ const size_type column)
{
Assert (row<n_block_rows(), ExcIndexRange (row, 0, n_block_rows()));
Assert (column<n_block_cols(), ExcIndexRange (column, 0, n_block_cols()));
template <typename Number>
inline
const SparseMatrixEZ<Number> &
-BlockSparseMatrixEZ<Number>::block (const unsigned int row,
- const unsigned int column) const
+BlockSparseMatrixEZ<Number>::block (const size_type row,
+ const size_type column) const
{
Assert (row<n_block_rows(), ExcIndexRange (row, 0, n_block_rows()));
Assert (column<n_block_cols(), ExcIndexRange (column, 0, n_block_cols()));
template <typename Number>
inline
-types::global_dof_index
+size_type
BlockSparseMatrixEZ<Number>::m () const
{
return n_rows();
template <typename Number>
inline
-types::global_dof_index
+size_type
BlockSparseMatrixEZ<Number>::n () const
{
return n_cols();
template <typename Number>
inline
void
-BlockSparseMatrixEZ<Number>::set (const types::global_dof_index i,
- const types::global_dof_index j,
+BlockSparseMatrixEZ<Number>::set (const size_type i,
+ const size_type j,
const Number value)
{
Assert (numbers::is_finite(value), ExcNumberNotFinite());
- const std::pair<unsigned int,types::global_dof_index>
+ const std::pair<size_type,size_type>
row_index = row_indices.global_to_local (i),
col_index = column_indices.global_to_local (j);
block(row_index.first,col_index.first).set (row_index.second,
template <typename Number>
inline
void
-BlockSparseMatrixEZ<Number>::add (const types::global_dof_index i,
- const types::global_dof_index j,
+BlockSparseMatrixEZ<Number>::add (const size_type i,
+ const size_type j,
const Number value)
{
Assert (numbers::is_finite(value), ExcNumberNotFinite());
- const std::pair<unsigned int,types::global_dof_index>
+ const std::pair<size_type,size_type>
row_index = row_indices.global_to_local (i),
col_index = column_indices.global_to_local (j);
block(row_index.first,col_index.first).add (row_index.second,
dst = 0.;
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row,col).vmult_add (dst.block(row),
src.block(col));
}
Assert (src.n_blocks() == n_block_cols(),
ExcDimensionMismatch(src.n_blocks(), n_block_cols()));
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row,col).vmult_add (dst.block(row),
src.block(col));
}
dst = 0.;
- for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type row=0; row<n_block_rows(); ++row)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row,col).Tvmult_add (dst.block(col),
src.block(row));
}
Assert (src.n_blocks() == n_block_rows(),
ExcDimensionMismatch(src.n_blocks(), n_block_rows()));
- for (unsigned int row=0; row<n_block_rows(); ++row)
+ for (size_type row=0; row<n_block_rows(); ++row)
{
- for (unsigned int col=0; col<n_block_cols(); ++col)
+ for (size_type col=0; col<n_block_cols(); ++col)
block(row,col).Tvmult_add (dst.block(col),
src.block(row));
};
void
BlockSparseMatrixEZ<number>::print_statistics (STREAM &out, bool full)
{
- unsigned int used_total = 0;
- unsigned int allocated_total = 0;
- unsigned int reserved_total = 0;
- std::vector<unsigned int> used_by_line_total;
-
- unsigned int used;
- unsigned int allocated;
- unsigned int reserved;
- std::vector<unsigned int> used_by_line;
-
- for (unsigned int i=0; i<n_block_rows(); ++i)
- for (unsigned int j=0; j<n_block_cols(); ++j)
+ size_type used_total = 0;
+ size_type allocated_total = 0;
+ size_type reserved_total = 0;
+ std::vector<size_type> used_by_line_total;
+
+ size_type used;
+ size_type allocated;
+ size_type reserved;
+ std::vector<size_type> used_by_line;
+
+ for (size_type i=0; i<n_block_rows(); ++i)
+ for (size_type j=0; j<n_block_cols(); ++j)
{
used_by_line.clear();
out << "block:\t" << i << '\t' << j << std::endl;
if (full)
{
used_by_line_total.resize(used_by_line.size());
- for (unsigned int i=0; i< used_by_line.size(); ++i)
+ for (size_type i=0; i< used_by_line.size(); ++i)
if (used_by_line[i] != 0)
{
out << "row-entries\t" << i
<< "used:" << used_total << std::endl
<< "allocated:" << allocated_total << std::endl
<< "reserved:" << reserved_total << std::endl;
- for (unsigned int i=0; i< used_by_line_total.size(); ++i)
+ for (size_type i=0; i< used_by_line_total.size(); ++i)
if (used_by_line_total[i] != 0)
{
out << "row-entries\t" << i
template <typename number>
BlockSparseMatrixEZ<number>::
-BlockSparseMatrixEZ (const unsigned int rows,
- const unsigned int cols)
+BlockSparseMatrixEZ (const size_type rows,
+ const size_type cols)
:
row_indices (rows, 0),
column_indices (cols, 0)
// anything except than checking
// whether the base objects want to
// do something
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
block(r,c) = m.block(r,c);
return *this;
}
{
Assert (d==0, ExcScalarAssignmentOnlyForZeroValue());
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
block(r,c) = 0;
return *this;
template <typename number>
void
-BlockSparseMatrixEZ<number>::reinit (const unsigned int rows,
- const unsigned int cols)
+BlockSparseMatrixEZ<number>::reinit (const size_type rows,
+ const size_type cols)
{
row_indices.reinit(rows, 0);
column_indices.reinit(cols, 0);
bool
BlockSparseMatrixEZ<number>::empty () const
{
- for (unsigned int r=0; r<n_block_rows(); ++r)
- for (unsigned int c=0; c<n_block_cols(); ++c)
+ for (size_type r=0; r<n_block_rows(); ++r)
+ for (size_type c=0; c<n_block_cols(); ++c)
if (block(r,c).empty () == false)
return false;
return true;
void
BlockSparseMatrixEZ<number>::collect_sizes ()
{
- const unsigned int rows = n_block_rows();
- const unsigned int columns = n_block_cols();
- std::vector<types::global_dof_index> row_sizes (rows);
- std::vector<types::global_dof_index> col_sizes (columns);
+ const size_type rows = n_block_rows();
+ const size_type columns = n_block_cols();
+ std::vector<size_type> row_sizes (rows);
+ std::vector<size_type> col_sizes (columns);
// first find out the row sizes
// from the first block column
- for (unsigned int r=0; r<rows; ++r)
+ for (size_type r=0; r<rows; ++r)
row_sizes[r] = blocks[r][0].m();
// then check that the following
// block columns have the same
// sizes
- for (unsigned int c=1; c<columns; ++c)
- for (unsigned int r=0; r<rows; ++r)
+ for (size_type c=1; c<columns; ++c)
+ for (size_type r=0; r<rows; ++r)
Assert (row_sizes[r] == blocks[r][c].m(),
ExcDimensionMismatch (row_sizes[r], blocks[r][c].m()));
// then do the same with the columns
- for (unsigned int c=0; c<columns; ++c)
+ for (size_type c=0; c<columns; ++c)
col_sizes[c] = blocks[0][c].n();
- for (unsigned int r=1; r<rows; ++r)
- for (unsigned int c=0; c<columns; ++c)
+ for (size_type r=1; r<rows; ++r)
+ for (size_type c=0; c<columns; ++c)
Assert (col_sizes[c] == blocks[r][c].n(),
ExcDimensionMismatch (col_sizes[c], blocks[r][c].n()));
class BlockSparsityPatternBase : public Subscriptor
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Define a value which is used
* to indicate that a certain
* the respective value of the
* SparsityPattern class.
*/
- static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
+ static const size_type invalid_entry = SparsityPattern::invalid_entry;
/**
* Initialize the matrix empty,
* to call collect_sizes() after
* you assign them sizes.
*/
- BlockSparsityPatternBase (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
+ BlockSparsityPatternBase (const size_type n_block_rows,
+ const size_type n_block_columns);
/**
* Copy constructor. This
* user call whatever function
* she desires.
*/
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
+ void reinit (const size_type n_block_rows,
+ const size_type n_block_columns);
/**
* Copy operator. For this the
* given coordinates.
*/
SparsityPatternBase &
- block (const unsigned int row,
- const unsigned int column);
+ block (const size_type row,
+ const size_type column);
/**
* constant objects.
*/
const SparsityPatternBase &
- block (const unsigned int row,
- const unsigned int column) const;
+ block (const size_type row,
+ const size_type column) const;
/**
* Grant access to the object
* Return the number of blocks in a
* column.
*/
- unsigned int n_block_rows () const;
+ size_type n_block_rows () const;
/**
* Return the number of blocks in a
* row.
*/
- unsigned int n_block_cols () const;
+ size_type n_block_cols () const;
/**
* Return whether the object is
* blocks in a row, and the
* maximum over all rows.
*/
- unsigned int max_entries_per_row () const;
+ size_type max_entries_per_row () const;
/**
* Add a nonzero entry to the matrix.
* to which block <tt>(i,j)</tt> belongs
* and then relays to that block.
*/
- void add (const types::global_dof_index i, const types::global_dof_index j);
+ void add (const size_type i, const size_type j);
/**
* Add several nonzero entries to the
* blocks.
*/
template <typename ForwardIterator>
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
+ void add_entries (const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted = false);
/**
* Return number of rows of this
* of the (block-)rows of
* sub-matrices.
*/
- types::global_dof_index n_rows () const;
+ size_type n_rows () const;
/**
* Return number of columns of
* columns of the (block-)columns
* of sub-matrices.
*/
- types::global_dof_index n_cols () const;
+ size_type n_cols () const;
/**
* Check if a value at a certain
* position may be non-zero.
*/
- bool exists (const types::global_dof_index i, const types::global_dof_index j) const;
+ bool exists (const size_type i, const size_type j) const;
/**
* Number of entries in a
* all the blocks that form this
* row.
*/
- unsigned int row_length (const types::global_dof_index row) const;
+ size_type row_length (const size_type row) const;
/**
* Return the number of nonzero
* the sum of the values as
* returned by the sub-objects.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Print the sparsity of the
/**
* Number of block rows.
*/
- unsigned int rows;
+ size_type rows;
/**
* Number of block columns.
*/
- unsigned int columns;
+ size_type columns;
/**
* Array of sparsity patterns.
* individual blocks when doing a
* collective add or set.
*/
- std::vector<unsigned int> counter_within_block;
+ std::vector<size_type > counter_within_block;
/**
* Temporary vector for column
* local to global data on each
* sparse matrix.
*/
- std::vector<std::vector<unsigned int> > block_column_indices;
+ std::vector<std::vector<size_type > > block_column_indices;
/**
* Make the block sparse matrix a
* to call collect_sizes() after
* you assign them sizes.
*/
- BlockSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
+ BlockSparsityPattern (const size_type n_rows,
+ const size_type n_columns);
/**
* Forwarding to
* BlockSparsityPatternBase::reinit().
*/
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
+ void reinit (const size_type n_block_rows,
+ const size_type n_block_columns);
/**
* Initialize the pattern with
*/
void reinit (const BlockIndices &row_indices,
const BlockIndices &col_indices,
- const std::vector<std::vector<types::global_dof_index> > &row_lengths);
+ const std::vector<std::vector<size_type> > &row_lengths);
/**
* to call collect_sizes() after
* you assign them sizes.
*/
- BlockCompressedSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
+ BlockCompressedSparsityPattern (const size_type n_rows,
+ const size_type n_columns);
/**
* Initialize the pattern with
* and then entering the index
* values.
*/
- BlockCompressedSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
- const std::vector<types::global_dof_index> &col_block_sizes);
+ BlockCompressedSparsityPattern (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Initialize the pattern with
* <tt>row_block_sizes[i]</tt>
* times <tt>col_block_sizes[j]</tt>.
*/
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
+ void reinit (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Resize the matrix to a tensor
* to call collect_sizes() after
* you assign them sizes.
*/
- BlockCompressedSetSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
+ BlockCompressedSetSparsityPattern (const size_type n_rows,
+ const size_type n_columns);
/**
* Initialize the pattern with
* and then entering the index
* values.
*/
- BlockCompressedSetSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
- const std::vector<types::global_dof_index> &col_block_sizes);
+ BlockCompressedSetSparsityPattern (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Initialize the pattern with
* <tt>row_block_sizes[i]</tt>
* times <tt>col_block_sizes[j]</tt>.
*/
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
+ void reinit (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Resize the matrix to a tensor
* to call collect_sizes() after
* you assign them sizes.
*/
- BlockCompressedSimpleSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
+ BlockCompressedSimpleSparsityPattern (const size_type n_rows,
+ const size_type n_columns);
/**
* Initialize the pattern with
* and then entering the index
* values.
*/
- BlockCompressedSimpleSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
- const std::vector<types::global_dof_index> &col_block_sizes);
+ BlockCompressedSimpleSparsityPattern (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Initialize the pattern with symmetric
* <tt>row_block_sizes[i]</tt>
* times <tt>col_block_sizes[j]</tt>.
*/
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
+ void reinit (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Resize the pattern with symmetric
* Return the column number of
* the @p index th entry in row @p row.
*/
- unsigned int column_number (const unsigned int row,
- const unsigned int index) const;
+ size_type column_number (const size_type row,
+ const size_type index) const;
/**
* Allow the use of the reinit
* to call collect_sizes() after
* you assign them sizes.
*/
- BlockSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
+ BlockSparsityPattern (const size_type n_rows,
+ const size_type n_columns);
/**
* Initialize the pattern with
* and then entering the index
* values.
*/
- BlockSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
- const std::vector<types::global_dof_index> &col_block_sizes);
+ BlockSparsityPattern (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Initialize the pattern with an array
* <tt>row_block_sizes[i]</tt>
* times <tt>col_block_sizes[j]</tt>.
*/
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
+ void reinit (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes);
/**
* Resize the matrix to a square tensor
template <class SparsityPatternBase>
inline
SparsityPatternBase &
-BlockSparsityPatternBase<SparsityPatternBase>::block (const unsigned int row,
- const unsigned int column)
+BlockSparsityPatternBase<SparsityPatternBase>::block (const size_type row,
+ const size_type column)
{
Assert (row<rows, ExcIndexRange(row,0,rows));
Assert (column<columns, ExcIndexRange(column,0,columns));
template <class SparsityPatternBase>
inline
const SparsityPatternBase &
-BlockSparsityPatternBase<SparsityPatternBase>::block (const unsigned int row,
- const unsigned int column) const
+BlockSparsityPatternBase<SparsityPatternBase>::block (const size_type row,
+ const size_type column) const
{
Assert (row<rows, ExcIndexRange(row,0,rows));
Assert (column<columns, ExcIndexRange(column,0,columns));
template <class SparsityPatternBase>
inline
void
-BlockSparsityPatternBase<SparsityPatternBase>::add (const types::global_dof_index i,
- const types::global_dof_index j)
+BlockSparsityPatternBase<SparsityPatternBase>::add (const size_type i,
+ const size_type j)
{
// if you get an error here, are
// you sure you called
// <tt>collect_sizes()</tt> before?
- const std::pair<unsigned int,types::global_dof_index>
+ const std::pair<size_type,size_type>
row_index = row_indices.global_to_local (i),
col_index = column_indices.global_to_local (j);
sub_objects[row_index.first][col_index.first]->add (row_index.second,
template <class SparsityPatternBase>
template <typename ForwardIterator>
void
-BlockSparsityPatternBase<SparsityPatternBase>::add_entries (const types::global_dof_index row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted)
+BlockSparsityPatternBase<SparsityPatternBase>::add_entries (const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted)
{
// Resize scratch arrays
if (block_column_indices.size() < this->n_block_cols())
counter_within_block.resize (this->n_block_cols());
}
- const types::global_dof_index n_cols = static_cast<types::global_dof_index>(end - begin);
+ const size_type n_cols = static_cast<size_type>(end - begin);
// Resize sub-arrays to n_cols. This
// is a bit wasteful, but we resize
// enough before actually going
// through all of them.
if (block_column_indices[0].size() < n_cols)
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ for (size_type i=0; i<this->n_block_cols(); ++i)
block_column_indices[i].resize(n_cols);
// Reset the number of added elements
// in each block to zero.
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ for (size_type i=0; i<this->n_block_cols(); ++i)
counter_within_block[i] = 0;
// Go through the column indices to
// comes from an element matrix).
for (ForwardIterator it = begin; it != end; ++it)
{
- const types::global_dof_index col = *it;
+ const size_type col = *it;
- const std::pair<unsigned int, types::global_dof_index>
+ const std::pair<size_type , size_type>
col_index = this->column_indices.global_to_local(col);
- const unsigned int local_index = counter_within_block[col_index.first]++;
+ const size_type local_index = counter_within_block[col_index.first]++;
block_column_indices[col_index.first][local_index] = col_index.second;
}
#ifdef DEBUG
// If in debug mode, do a check whether
// the right length has been obtained.
- unsigned int length = 0;
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ size_type length = 0;
+ for (size_type i=0; i<this->n_block_cols(); ++i)
length += counter_within_block[i];
Assert (length == n_cols, ExcInternalError());
#endif
// where we should start reading out
// data. Now let's write the data into
// the individual blocks!
- const std::pair<unsigned int,types::global_dof_index>
+ const std::pair<size_type , size_type>
row_index = this->row_indices.global_to_local (row);
- for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
+ for (size_type block_col=0; block_col<n_block_cols(); ++block_col)
{
if (counter_within_block[block_col] == 0)
continue;
template <class SparsityPatternBase>
inline
bool
-BlockSparsityPatternBase<SparsityPatternBase>::exists (const types::global_dof_index i,
- const types::global_dof_index j) const
+BlockSparsityPatternBase<SparsityPatternBase>::exists (const size_type i,
+ const size_type j) const
{
// if you get an error here, are
// you sure you called
// <tt>collect_sizes()</tt> before?
- const std::pair<unsigned int,types::global_dof_index>
+ const std::pair<size_type , size_type>
row_index = row_indices.global_to_local (i),
col_index = column_indices.global_to_local (j);
return sub_objects[row_index.first][col_index.first]->exists (row_index.second,
template <class SparsityPatternBase>
inline
-unsigned int
+size_type
BlockSparsityPatternBase<SparsityPatternBase>::
-row_length (const types::global_dof_index row) const
+row_length (const size_type row) const
{
- const std::pair<unsigned int,types::global_dof_index>
+ const std::pair<size_type , size_type>
row_index = row_indices.global_to_local (row);
- unsigned int c = 0;
+ size_type c = 0;
- for (unsigned int b=0; b<rows; ++b)
+ for (size_type b=0; b<rows; ++b)
c += sub_objects[row_index.first][b]->row_length (row_index.second);
return c;
template <class SparsityPatternBase>
inline
-unsigned int
+size_type
BlockSparsityPatternBase<SparsityPatternBase>::n_block_cols () const
{
return columns;
template <class SparsityPatternBase>
inline
-unsigned int
+size_type
BlockSparsityPatternBase<SparsityPatternBase>::n_block_rows () const
{
return rows;
inline
-unsigned int
-BlockCompressedSimpleSparsityPattern::column_number (const unsigned int row,
- const unsigned int index) const
+size_type
+BlockCompressedSimpleSparsityPattern::column_number (const size_type row,
+ const size_type index) const
{
// .first= ith block, .second = jth row in that block
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type ,size_type >
row_index = row_indices.global_to_local (row);
Assert(index<row_length(row), ExcIndexRange(index, 0, row_length(row)));
- unsigned int c = 0;
- for (unsigned int b=0; b<columns; ++b)
+ size_type c = 0;
+ for (size_type b=0; b<columns; ++b)
{
- unsigned int rowlen = sub_objects[row_index.first][b]->row_length (row_index.second);
+ size_type rowlen = sub_objects[row_index.first][b]->row_length (row_index.second);
if (index<c+rowlen)
return c+sub_objects[row_index.first][b]->column_number(row_index.second, index-c);
c += rowlen;
inline
void
BlockSparsityPattern::reinit (
- const unsigned int n_block_rows,
- const unsigned int n_block_columns)
+ const size_type n_block_rows,
+ const size_type n_block_columns)
{
BlockSparsityPatternBase<SparsityPattern>::reinit (
n_block_rows, n_block_columns);
class BlockVector : public BlockVectorBase<Vector<Number> >
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std:size_t size_type;
+
/**
* Typedef the base class for simpler
* access to its own typedefs.
* use blocks of different
* sizes.
*/
- explicit BlockVector (const unsigned int num_blocks = 0,
- const types::global_dof_index block_size = 0);
+ explicit BlockVector (const size_type num_blocks = 0,
+ const size_type block_size = 0);
/**
* Copy-Constructor. Dimension set to
* <tt>block_sizes[i]</tt> zero
* elements.
*/
- BlockVector (const std::vector<types::global_dof_index> &block_sizes);
+ BlockVector (const std::vector<size_type> &block_sizes);
/**
* Constructor. Initialize vector
* different blocks.
*/
template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
+ BlockVector (const std::vector<size_type> &n,
const InputIterator first,
const InputIterator end);
* is filled with zeros.
*/
void reinit (const unsigned int num_blocks,
- const types::global_dof_index block_size = 0,
+ const size_type block_size = 0,
const bool fast = false);
/**
* since they may be routed to
* the wrong block.
*/
- void reinit (const std::vector<types::global_dof_index> &N,
- const bool fast=false);
+ void reinit (const std::vector<size_type> &N,
+ const bool fast=false);
/**
* Reinitialize the BlockVector
template <typename Number>
template <typename InputIterator>
-BlockVector<Number>::BlockVector (const std::vector<unsigned int> &n,
+BlockVector<Number>::BlockVector (const std::vector<size_type> &n,
const InputIterator first,
const InputIterator end)
{
// copy elements soon
reinit (n, true);
InputIterator start = first;
- for (unsigned int b=0; b<n.size(); ++b)
+ for (size_type b=0; b<n.size(); ++b)
{
InputIterator end = start;
std::advance (end, static_cast<signed int>(n[b]));
inline
void BlockVector<Number>::compress (::dealii::VectorOperation::values operation)
{
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i].compress(operation);
}
Assert (numbers::is_finite(factor), ExcNumberNotFinite());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] *= factor;
}
template <typename Number>
BlockVector<Number>::BlockVector (const unsigned int n_blocks,
- const types::global_dof_index block_size)
+ const size_type block_size)
{
reinit (n_blocks, block_size);
}
template <typename Number>
-BlockVector<Number>::BlockVector (const std::vector<types::global_dof_index> &n)
+BlockVector<Number>::BlockVector (const std::vector<size_type> &n)
{
reinit (n, false);
}
this->components.resize (v.n_blocks());
this->block_indices = v.block_indices;
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.components[i];
}
this->block_indices = v.get_block_indices();
this->components.resize(this->n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.block(i);
BaseClass::collect_sizes();
template <typename Number>
-void BlockVector<Number>::reinit (const unsigned int n_bl,
- const types::global_dof_index bl_sz,
- const bool fast)
+void BlockVector<Number>::reinit (const size_type n_bl,
+ const szie_type bl_sz,
+ const bool fast)
{
- std::vector<types::global_dof_index> n(n_bl, bl_sz);
+ std::vector<size_type> n(n_bl, bl_sz);
reinit(n, fast);
}
template <typename Number>
-void BlockVector<Number>::reinit (const std::vector<types::global_dof_index> &n,
- const bool fast)
+void BlockVector<Number>::reinit (const std::vector<size_type> &n,
+ const bool fast)
{
this->block_indices.reinit (n);
if (this->components.size() != this->n_blocks())
this->components.resize(this->n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i].reinit(n[i], fast);
}
if (this->components.size() != this->n_blocks())
this->components.resize(this->n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i].reinit(n.block_size(i), fast);
}
if (this->components.size() != this->n_blocks())
this->components.resize(this->n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->block(i).reinit(v.block(i), fast);
}
Assert (this->n_blocks() == v.n_blocks(),
ExcDimensionMismatch(this->n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
dealii::swap (this->components[i], v.components[i]);
dealii::swap (this->block_indices, v.block_indices);
}
const bool scientific,
const bool across) const
{
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
{
if (across)
out << 'C' << i << ':';
template <typename Number>
void BlockVector<Number>::block_write (std::ostream &out) const
{
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i].block_write(out);
}
template <typename Number>
void BlockVector<Number>::block_read (std::istream &in)
{
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i].block_read(in);
}
typedef Iterator<BlockVectorType,!constness> InverseConstnessIterator;
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Type of the number this
* iterator points
* const or non-const
* reference.
*/
- Iterator (BlockVector &parent,
- const types::global_dof_index global_index);
+ Iterator (BlockVector &parent,
+ const size_type global_index);
/**
* Copy constructor.
* the respective member
* variables.
*/
- Iterator (BlockVector &parent,
- const types::global_dof_index global_index,
- const unsigned int current_block,
- const types::global_dof_index index_within_block,
- const types::global_dof_index next_break_forward,
- const types::global_dof_index next_break_backward);
+ Iterator (BlockVector &parent,
+ const size_type global_index,
+ const size_type current_block,
+ const size_type index_within_block,
+ const size_type next_break_forward,
+ const size_type next_break_backward);
public:
* element to which we
* presently point.
*/
- types::global_dof_index global_index;
+ size_type global_index;
/**
* Current block and index
* element presently pointed
* to.
*/
- unsigned int current_block;
- types::global_dof_index index_within_block;
+ size_type current_block;
+ size_type index_within_block;
/**
* Indices of the global
* efficient than always
* asking the parent object.
*/
- types::global_dof_index next_break_forward;
- types::global_dof_index next_break_backward;
+ size_type next_break_forward;
+ size_type next_break_backward;
/**
* Move forward one element.
* Access to a single block.
*/
BlockType &
- block (const unsigned int i);
+ block (const size_type i);
/**
* Read-only access to a single block.
*/
const BlockType &
- block (const unsigned int i) const;
+ block (const size_type i) const;
/**
* Return a reference on the
* is the sum of the dimensions of all
* components.
*/
- types::global_dof_index size () const;
+ size_type size () const;
/**
* Return an iterator pointing to
/**
* Access components, returns U(i).
*/
- value_type operator() (const types::global_dof_index i) const;
+ value_type operator() (const size_type i) const;
/**
* Access components, returns U(i)
* as a writeable reference.
*/
- reference operator() (const types::global_dof_index i);
+ reference operator() (const size_type i);
/**
* Access components, returns U(i).
*
* Exactly the same as operator().
*/
- value_type operator[] (const types::global_dof_index i) const;
+ value_type operator[] (const size_type i) const;
/**
* Access components, returns U(i)
*
* Exactly the same as operator().
*/
- reference operator[] (const types::global_dof_index i);
+ reference operator[] (const size_type i);
/**
* Copy operator: fill all components of
* in the local range of this processor.
* Asks the corresponding block.
*/
- bool in_local_range (const types::global_dof_index global_index) const;
+ bool in_local_range (const size_type global_index) const;
/**
* Return whether the vector contains only
* indices.
*/
template <typename Number>
- void add (const std::vector<types::global_dof_index> &indices,
- const std::vector<Number> &values);
+ void add (const std::vector<size_type> &indices,
+ const std::vector<Number> &values);
/**
* This is a second collective
* values.
*/
template <typename Number>
- void add (const std::vector<types::global_dof_index> &indices,
- const Vector<Number> &values);
+ void add (const std::vector<size_type> &indices,
+ const Vector<Number> &values);
/**
* Take an address where
* functions above.
*/
template <typename Number>
- void add (const unsigned int n_elements,
- const types::global_dof_index *indices,
- const Number *values);
+ void add (const size_type n_elements,
+ const size_type *indices,
+ const Number *values);
/**
* $U(0-DIM)+=s$. Addition of <tt>s</tt>
inline
Iterator<BlockVectorType,constness>::
Iterator (BlockVector &parent,
- const types::global_dof_index global_index,
- const unsigned int current_block,
- const types::global_dof_index index_within_block,
- const types::global_dof_index next_break_forward,
- const types::global_dof_index next_break_backward)
+ const size_type global_index,
+ const size_type current_block,
+ const size_type index_within_block,
+ const size_type next_break_forward,
+ const size_type next_break_backward)
:
parent (&parent),
global_index (global_index),
template <class BlockVectorType, bool constness>
Iterator<BlockVectorType,constness>::
- Iterator (BlockVector &parent,
- const types::global_dof_index global_index)
+ Iterator (BlockVector &parent,
+ const size_type global_index)
:
parent (&parent),
global_index (global_index)
// past-the-end
if (global_index < parent.size())
{
- const std::pair<unsigned int, types::global_dof_index>
+ const std::pair<size_type, size_type>
indices = parent.block_indices.global_to_local(global_index);
current_block = indices.first;
index_within_block = indices.second;
template <class VectorType>
inline
-types::global_dof_index
+size_type
BlockVectorBase<VectorType>::size () const
{
return block_indices.total_size();
template <class VectorType>
inline
-unsigned int
+size_type
BlockVectorBase<VectorType>::n_blocks () const
{
return block_indices.size();
template <class VectorType>
inline
typename BlockVectorBase<VectorType>::BlockType &
-BlockVectorBase<VectorType>::block (const unsigned int i)
+BlockVectorBase<VectorType>::block (const size_type i)
{
Assert(i<n_blocks(), ExcIndexRange(i,0,n_blocks()));
template <class VectorType>
inline
const typename BlockVectorBase<VectorType>::BlockType &
-BlockVectorBase<VectorType>::block (const unsigned int i) const
+BlockVectorBase<VectorType>::block (const size_type i) const
{
Assert(i<n_blocks(), ExcIndexRange(i,0,n_blocks()));
void
BlockVectorBase<VectorType>::collect_sizes ()
{
- std::vector<types::global_dof_index> sizes (n_blocks());
+ std::vector<size_type> sizes (n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
sizes[i] = block(i).size();
block_indices.reinit(sizes);
void
BlockVectorBase<VectorType>::compress (::dealii::VectorOperation::values operation)
{
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
block(i).compress (operation);
}
inline
bool
BlockVectorBase<VectorType>::in_local_range
-(const types::global_dof_index global_index) const
+(const size_type global_index) const
{
- const std::pair<unsigned int,unsigned int> local_index
+ const std::pair<size_type,size_type> local_index
= block_indices.global_to_local (global_index);
return components[local_index.first].in_local_range (global_index);
bool
BlockVectorBase<VectorType>::all_zero () const
{
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
if (components[i].all_zero() == false)
return false;
bool
BlockVectorBase<VectorType>::is_non_negative () const
{
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
if (components[i].is_non_negative() == false)
return false;
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
value_type sum = 0.;
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
sum += components[i]*v.components[i];
return sum;
BlockVectorBase<VectorType>::norm_sqr () const
{
real_type sum = 0.;
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
sum += components[i].norm_sqr();
return sum;
BlockVectorBase<VectorType>::mean_value () const
{
value_type sum = 0.;
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
sum += components[i].mean_value() * components[i].size();
return sum/size();
BlockVectorBase<VectorType>::l1_norm () const
{
real_type sum = 0.;
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
sum += components[i].l1_norm();
return sum;
BlockVectorBase<VectorType>::linfty_norm () const
{
real_type sum = 0.;
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
value_type newval = components[i].linfty_norm();
if (sum<newval)
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i] -= v.components[i];
}
template <typename Number>
inline
void
-BlockVectorBase<VectorType>::add (const std::vector<types::global_dof_index> &indices,
- const std::vector<Number> &values)
+BlockVectorBase<VectorType>::add (const std::vector<size_type> &indices,
+ const std::vector<Number> &values)
{
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(), values.size()));
template <typename Number>
inline
void
-BlockVectorBase<VectorType>::add (const std::vector<types::global_dof_index> &indices,
- const Vector<Number> &values)
+BlockVectorBase<VectorType>::add (const std::vector<size_type> &indices,
+ const Vector<Number> &values)
{
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(), values.size()));
- const unsigned int n_indices = indices.size();
- for (unsigned int i=0; i<n_indices; ++i)
+ const size_type n_indices = indices.size();
+ for (size_type i=0; i<n_indices; ++i)
(*this)(indices[i]) += values(i);
}
template <typename Number>
inline
void
-BlockVectorBase<VectorType>::add (const unsigned int n_indices,
- const types::global_dof_index *indices,
- const Number *values)
+BlockVectorBase<VectorType>::add (const size_type n_indices,
+ const size_type *indices,
+ const Number *values)
{
- for (unsigned int i=0; i<n_indices; ++i)
+ for (size_type i=0; i<n_indices; ++i)
(*this)(indices[i]) += values[i];
}
{
Assert (numbers::is_finite(a), ExcNumberNotFinite());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].add(a);
}
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].add(v.components[i]);
}
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].add(a, v.components[i]);
}
ExcDimensionMismatch(n_blocks(), w.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].add(a, v.components[i], b, w.components[i]);
}
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].sadd(x, v.components[i]);
}
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].sadd(x, a, v.components[i]);
}
Assert (n_blocks() == w.n_blocks(),
ExcDimensionMismatch(n_blocks(), w.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].sadd(x, a, v.components[i], b, w.components[i]);
}
Assert (n_blocks() == y.n_blocks(),
ExcDimensionMismatch(n_blocks(), y.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].sadd(x, a, v.components[i],
b, w.components[i], c, y.components[i]);
{
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].scale(v.block(i));
}
Assert (n_blocks() == w.n_blocks(),
ExcDimensionMismatch(n_blocks(), w.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
{
components[i].equ( a, v.components[i], b, w.components[i]);
}
BlockVectorBase<VectorType>::memory_consumption () const
{
std::size_t mem = sizeof(this->n_blocks());
- for (unsigned int i=0; i<this->components.size(); ++i)
+ for (size_type i=0; i<this->components.size(); ++i)
mem += MemoryConsumption::memory_consumption (this->components[i]);
mem += MemoryConsumption::memory_consumption (this->block_indices);
return mem;
Assert (n_blocks() == v.n_blocks(),
ExcDimensionMismatch(n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i].equ( a, v.components[i]);
}
template <class VectorType>
void BlockVectorBase<VectorType>::update_ghost_values () const
{
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
block(i).update_ghost_values ();
}
Assert (numbers::is_finite(s), ExcNumberNotFinite());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i] = s;
return *this;
{
AssertDimension(n_blocks(), v.n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i] = v.components[i];
return *this;
{
AssertDimension(n_blocks(), v.n_blocks());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i] = v.components[i];
return *this;
Assert (size() == v.size(),
ExcDimensionMismatch(size(), v.size()));
- unsigned int index_v = 0;
- for (unsigned int b=0; b<n_blocks(); ++b)
- for (unsigned int i=0; i<block(b).size(); ++i, ++index_v)
+ size_type index_v = 0;
+ for (size_type b=0; b<n_blocks(); ++b)
+ for (size_type i=0; i<block(b).size(); ++i, ++index_v)
block(b)(i) = v(index_v);
return *this;
{
Assert (block_indices == v.block_indices, ExcDifferentBlockIndices());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
if ( ! (components[i] == v.components[i]))
return false;
Assert (numbers::is_finite(factor), ExcNumberNotFinite());
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i] *= factor;
return *this;
Assert (numbers::is_finite(factor), ExcNumberNotFinite());
Assert (factor > 0., ExcDivideByZero() );
- for (unsigned int i=0; i<n_blocks(); ++i)
+ for (size_type i=0; i<n_blocks(); ++i)
components[i] /= factor;
return *this;
template <class VectorType>
inline
typename BlockVectorBase<VectorType>::value_type
-BlockVectorBase<VectorType>::operator() (const types::global_dof_index i) const
+BlockVectorBase<VectorType>::operator() (const size_type i) const
{
- const std::pair<unsigned int,types::global_dof_index> local_index
+ const std::pair<size_type,size_type> local_index
= block_indices.global_to_local (i);
return components[local_index.first](local_index.second);
}
template <class VectorType>
inline
typename BlockVectorBase<VectorType>::reference
-BlockVectorBase<VectorType>::operator() (const types::global_dof_index i)
+BlockVectorBase<VectorType>::operator() (const size_type i)
{
- const std::pair<unsigned int,types::global_dof_index> local_index
+ const std::pair<size_type,size_type> local_index
= block_indices.global_to_local (i);
return components[local_index.first](local_index.second);
}
template <class VectorType>
inline
typename BlockVectorBase<VectorType>::value_type
-BlockVectorBase<VectorType>::operator[] (const types::global_dof_index i) const
+BlockVectorBase<VectorType>::operator[] (const size_type i) const
{
return operator()(i);
}
template <class VectorType>
inline
typename BlockVectorBase<VectorType>::reference
-BlockVectorBase<VectorType>::operator[] (const types::global_dof_index i)
+BlockVectorBase<VectorType>::operator[] (const size_type i)
{
return operator()(i);
}
class ChunkSparseMatrix : public virtual Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Type of matrix entries. In analogy to
* the STL container classes.
* matrix is of dimension
* $m \times n$.
*/
- types::global_dof_index m () const;
+ size_type m () const;
/**
* Return the dimension of the
* matrix is of dimension
* $m \times n$.
*/
- types::global_dof_index n () const;
+ size_type n () const;
/**
* Return the number of nonzero
* the entries should happen to
* be zero, it is counted anyway.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Return the number of actually
* sparsity pattern but only the
* ones that are nonzero.
*/
- unsigned int n_actually_nonzero_elements () const;
+ size_type n_actually_nonzero_elements () const;
/**
* Return a (constant) reference
* is allowed to store zero
* values in non-existent fields.
*/
- void set (const types::global_dof_index i,
- const types::global_dof_index j,
+ void set (const size_type i,
+ const size_type j,
const number value);
/**
* is allowed to store zero
* values in non-existent fields.
*/
- void add (const types::global_dof_index i,
- const types::global_dof_index j,
+ void add (const size_type i,
+ const size_type j,
const number value);
/**
* into the matrix.
*/
template <typename number2>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
+ void add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number2 *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
/**
* Multiply the entire matrix by a
* tailored better to a sparse matrix
* structure.
*/
- number operator () (const types::global_dof_index i,
- const types::global_dof_index j) const;
+ number operator () (const size_type i,
+ const size_type j) const;
/**
* This function is mostly like
* tailored better to a sparse matrix
* structure.
*/
- number el (const types::global_dof_index i,
- const types::global_dof_index j) const;
+ number el (const size_type i,
+ const size_type j) const;
/**
* Return the main diagonal
* involve searching for the
* right column number.
*/
- number diag_element (const types::global_dof_index i) const;
+ number diag_element (const size_type i) const;
/**
* Same as above, but return a
* writeable reference. You're
* sure you know what you do?
*/
- number &diag_element (const types::global_dof_index i);
+ number &diag_element (const size_type i);
//@}
/**
*/
template <typename somenumber>
void PSOR (Vector<somenumber> &v,
- const std::vector<unsigned int> &permutation,
- const std::vector<unsigned int> &inverse_permutation,
+ const std::vector<size_type> &permutation,
+ const std::vector<size_type> &inverse_permutation,
const number om = 1.) const;
/**
*/
template <typename somenumber>
void TPSOR (Vector<somenumber> &v,
- const std::vector<unsigned int> &permutation,
- const std::vector<unsigned int> &inverse_permutation,
+ const std::vector<size_type> &permutation,
+ const std::vector<size_type> &inverse_permutation,
const number om = 1.) const;
/**
* object, using the reinit()
* function.
*/
- unsigned int max_len;
+ size_type max_len;
/**
* Return the location of entry
* $(i,j)$ within the val array.
*/
- unsigned int compute_location (const types::global_dof_index i,
- const types::global_dof_index j) const;
+ size_type compute_location (const size_type i,
+ const size_type j) const;
// make all other sparse matrices
// friends
template <typename number>
inline
-types::global_dof_index ChunkSparseMatrix<number>::m () const
+size_type ChunkSparseMatrix<number>::m () const
{
Assert (cols != 0, ExcNotInitialized());
return cols->rows;
template <typename number>
inline
-types::global_dof_index ChunkSparseMatrix<number>::n () const
+size_type ChunkSparseMatrix<number>::n () const
{
Assert (cols != 0, ExcNotInitialized());
return cols->cols;
template <typename number>
inline
-unsigned int
-ChunkSparseMatrix<number>::compute_location (const types::global_dof_index i,
- const types::global_dof_index j) const
+size_type
+ChunkSparseMatrix<number>::compute_location (const size_type i,
+ const size_type j) const
{
- const unsigned int chunk_size = cols->get_chunk_size();
- const unsigned int chunk_index
+ const size_type chunk_size = cols->get_chunk_size();
+ const size_type chunk_index
= cols->sparsity_pattern(i/chunk_size, j/chunk_size);
if (chunk_index == ChunkSparsityPattern::invalid_entry)
template <typename number>
inline
-void ChunkSparseMatrix<number>::set (const types::global_dof_index i,
- const types::global_dof_index j,
+void ChunkSparseMatrix<number>::set (const size_type i,
+ const size_type j,
const number value)
{
// the matrix that are not part of
// the sparsity pattern, if the
// value to which we set it is zero
- const unsigned int index = compute_location(i,j);
+ const size_type index = compute_location(i,j);
Assert ((index != SparsityPattern::invalid_entry) ||
(value == 0.),
ExcInvalidIndex(i,j));
template <typename number>
inline
-void ChunkSparseMatrix<number>::add (const types::global_dof_index i,
- const types::global_dof_index j,
+void ChunkSparseMatrix<number>::add (const size_type i,
+ const size_type j,
const number value)
{
if (value != 0.)
{
- const unsigned int index = compute_location(i,j);
+ const size_type index = compute_location(i,j);
Assert ((index != ChunkSparsityPattern::invalid_entry),
ExcInvalidIndex(i,j));
template <typename number>
template <typename number2>
inline
-void ChunkSparseMatrix<number>::add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
- const bool /*elide_zero_values*/,
- const bool /*col_indices_are_sorted*/)
+void ChunkSparseMatrix<number>::add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const number2 *values,
+ const bool /*elide_zero_values*/,
+ const bool /*col_indices_are_sorted*/)
{
// TODO: could be done more efficiently...
- for (unsigned int col=0; col<n_cols; ++col)
+ for (size_type col=0; col<n_cols; ++col)
add(row, col_indices[col], static_cast<number>(values[col]));
}
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
// multiply all elements of the matrix with
// the given factor. this includes the
const number factor_inv = 1. / factor;
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
// multiply all elements of the matrix with
// the given factor. this includes the
template <typename number>
inline
-number ChunkSparseMatrix<number>::operator () (const types::global_dof_index i,
- const types::global_dof_index j) const
+number ChunkSparseMatrix<number>::operator () (const size_type i,
+ const size_type j) const
{
Assert (cols != 0, ExcNotInitialized());
AssertThrow (compute_location(i,j) != SparsityPattern::invalid_entry,
template <typename number>
inline
-number ChunkSparseMatrix<number>::el (const types::global_dof_index i,
- const types::global_dof_index j) const
+number ChunkSparseMatrix<number>::el (const size_type i,
+ const size_type j) const
{
Assert (cols != 0, ExcNotInitialized());
- const unsigned int index = compute_location(i,j);
+ const size_type index = compute_location(i,j);
if (index != ChunkSparsityPattern::invalid_entry)
return val[index];
template <typename number>
inline
-number ChunkSparseMatrix<number>::diag_element (const types::global_dof_index i) const
+number ChunkSparseMatrix<number>::diag_element (const size_type i) const
{
Assert (cols != 0, ExcNotInitialized());
Assert (m() == n(), ExcNotQuadratic());
// Use that the first element in each row
// of a quadratic matrix is the main
// diagonal of the chunk sparsity pattern
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
return val[cols->sparsity_pattern.rowstart[i/chunk_size]
*
chunk_size * chunk_size
template <typename number>
inline
-number &ChunkSparseMatrix<number>::diag_element (const types::global_dof_index i)
+number &ChunkSparseMatrix<number>::diag_element (const size_type i)
{
Assert (cols != 0, ExcNotInitialized());
Assert (m() == n(), ExcNotQuadratic());
// Use that the first element in each row
// of a quadratic matrix is the main
// diagonal of the chunk sparsity pattern
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
return val[cols->sparsity_pattern.rowstart[i/chunk_size]
*
chunk_size * chunk_size
ChunkSparseMatrix<number>::copy_from (const ForwardIterator begin,
const ForwardIterator end)
{
- Assert (static_cast<unsigned int>(std::distance (begin, end)) == m(),
+ Assert (static_cast<size_type >(std::distance (begin, end)) == m(),
ExcIteratorRange (std::distance (begin, end), m()));
// for use in the inner loop, we
// define a typedef to the type of
// the inner iterators
typedef typename std::iterator_traits<ForwardIterator>::value_type::const_iterator inner_iterator;
- unsigned int row=0;
+ size_type row=0;
for (ForwardIterator i=begin; i!=end; ++i, ++row)
{
const inner_iterator end_of_row = i->end();
typename DstIterator>
inline
void
- chunk_vmult_add (const unsigned int chunk_size,
+ chunk_vmult_add (const size_type chunk_size,
const MatrixIterator matrix,
const SrcIterator src,
DstIterator dst)
{
MatrixIterator matrix_row = matrix;
- for (unsigned int i=0; i<chunk_size;
+ for (size_type i=0; i<chunk_size;
++i, matrix_row += chunk_size)
{
typename std::iterator_traits<DstIterator>::value_type
sum = 0;
- for (unsigned int j=0; j<chunk_size; ++j)
+ for (size_type j=0; j<chunk_size; ++j)
sum += matrix_row[j] * src[j];
dst[i] += sum;
typename DstIterator>
inline
void
- chunk_vmult_subtract (const unsigned int chunk_size,
+ chunk_vmult_subtract (const size_type chunk_size,
const MatrixIterator matrix,
const SrcIterator src,
DstIterator dst)
{
MatrixIterator matrix_row = matrix;
- for (unsigned int i=0; i<chunk_size;
+ for (size_type i=0; i<chunk_size;
++i, matrix_row += chunk_size)
{
typename std::iterator_traits<DstIterator>::value_type
sum = 0;
- for (unsigned int j=0; j<chunk_size; ++j)
+ for (size_type j=0; j<chunk_size; ++j)
sum += matrix_row[j] * src[j];
dst[i] -= sum;
typename DstIterator>
inline
void
- chunk_Tvmult_add (const unsigned int chunk_size,
+ chunk_Tvmult_add (const size_type chunk_size,
const MatrixIterator matrix,
const SrcIterator src,
DstIterator dst)
{
- for (unsigned int i=0; i<chunk_size; ++i)
+ for (size_type i=0; i<chunk_size; ++i)
{
typename std::iterator_traits<DstIterator>::value_type
sum = 0;
- for (unsigned int j=0; j<chunk_size; ++j)
+ for (size_type j=0; j<chunk_size; ++j)
sum += matrix[j*chunk_size+i] * src[j];
dst[i] += sum;
typename SrcIterator2>
inline
result_type
- chunk_matrix_scalar_product (const unsigned int chunk_size,
+ chunk_matrix_scalar_product (const size_type chunk_size,
const MatrixIterator matrix,
const SrcIterator1 u,
const SrcIterator2 v)
MatrixIterator matrix_row = matrix;
- for (unsigned int i=0; i<chunk_size;
+ for (size_type i=0; i<chunk_size;
++i, matrix_row += chunk_size)
{
typename std::iterator_traits<SrcIterator2>::value_type
sum = 0;
- for (unsigned int j=0; j<chunk_size; ++j)
+ for (size_type j=0; j<chunk_size; ++j)
sum += matrix_row[j] * v[j];
result += u[i] * sum;
Assert (c.n_cols() == id.n(), ExcDimensionMismatch (c.n_cols(), id.n()));
reinit (c);
- for (unsigned int i=0; i<n(); ++i)
+ for (size_type i=0; i<n(); ++i)
this->set(i,i,1.);
}
if (val)
{
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
std::fill_n (val,
cols->sparsity_pattern.n_nonzero_elements() *
chunk_size * chunk_size,
ExcDimensionMismatch (cols->n_cols(), id.n()));
*this = 0;
- for (unsigned int i=0; i<n(); ++i)
+ for (size_type i=0; i<n(); ++i)
this->set(i,i,1.);
return *this;
// enough so that we can store full
// chunks. this entails some padding
// elements
- const unsigned int chunk_size = cols->get_chunk_size();
- const unsigned int N = cols->sparsity_pattern.n_nonzero_elements() *
+ const size_type chunk_size = cols->get_chunk_size();
+ const size_type N = cols->sparsity_pattern.n_nonzero_elements() *
chunk_size * chunk_size;
if (N > max_len || max_len == 0)
{
template <typename number>
-unsigned int
+size_type
ChunkSparseMatrix<number>::n_nonzero_elements () const
{
Assert (cols != 0, ExcNotInitialized());
template <typename number>
-unsigned int
+size_type
ChunkSparseMatrix<number>::n_actually_nonzero_elements () const
{
Assert (cols != 0, ExcNotInitialized());
// the matrix. since we have the invariant
// that padding elements are zero, nothing
// bad can happen here
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
return std::count_if(&val[0],
&val[cols->sparsity_pattern.n_nonzero_elements () *
chunk_size * chunk_size],
// copy everything, including padding
// elements
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
std::copy (&matrix.val[0],
&matrix.val[cols->sparsity_pattern.n_nonzero_elements()
* chunk_size * chunk_size],
*this = 0;
// then copy old matrix
- for (unsigned int row=0; row<matrix.m(); ++row)
- for (unsigned int col=0; col<matrix.n(); ++col)
+ for (size_type row=0; row<matrix.m(); ++row)
+ for (size_type col=0; col<matrix.n(); ++col)
if (matrix(row,col) != 0)
set (row, col, matrix(row,col));
}
// add everything, including padding
// elements
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
number *val_ptr = &val[0];
const somenumber *matrix_ptr = &matrix.val[0];
const number *const end_ptr = &val[cols->sparsity_pattern.n_nonzero_elements()
Assert (!PointerComparison::equal(&src, &dst), ExcSourceEqualsDestination());
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
// loop over all chunks. note that we need
// to treat the last chunk row and column
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
- const unsigned int n_regular_chunk_rows
+ const size_type n_regular_chunk_rows
= (rows_have_padding ?
n_chunk_rows-1 :
n_chunk_rows);
- const number *val_ptr = val;
- const unsigned int *colnum_ptr = cols->sparsity_pattern.colnums;
+ const number *val_ptr = val;
+ const size_type *colnum_ptr = cols->sparsity_pattern.colnums;
typename OutVector::iterator dst_ptr = dst.begin();
- for (unsigned int chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
+ for (size_type chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
{
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
else
// we're at a chunk column that
// has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
+= (val_ptr[r*cols->chunk_size + c] *
src(*colnum_ptr * cols->chunk_size + c));
// necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
{
// we're at a chunk row but not
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
+= (val_ptr[r*cols->chunk_size + c] *
src(*colnum_ptr * cols->chunk_size + c));
else
// we're at a chunk row and
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
+= (val_ptr[r*cols->chunk_size + c] *
src(*colnum_ptr * cols->chunk_size + c));
Assert (!PointerComparison::equal(&src, &dst), ExcSourceEqualsDestination());
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
// loop over all chunks. note that we need
// to treat the last chunk row and column
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
- const unsigned int n_regular_chunk_rows
+ const size_type n_regular_chunk_rows
= (rows_have_padding ?
n_chunk_rows-1 :
n_chunk_rows);
// like in vmult_add, but don't keep an
// iterator into dst around since we're not
// traversing it sequentially this time
- const number *val_ptr = val;
- const unsigned int *colnum_ptr = cols->sparsity_pattern.colnums;
+ const number *val_ptr = val;
+ const size_type *colnum_ptr = cols->sparsity_pattern.colnums;
- for (unsigned int chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
+ for (size_type chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
{
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
else
// we're at a chunk column that
// has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(*colnum_ptr * cols->chunk_size + c)
+= (val_ptr[r*cols->chunk_size + c] *
src(chunk_row * cols->chunk_size + r));
// necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
{
// we're at a chunk row but not
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
dst(*colnum_ptr * cols->chunk_size + c)
+= (val_ptr[r*cols->chunk_size + c] *
src(chunk_row * cols->chunk_size + r));
else
// we're at a chunk row and
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(*colnum_ptr * cols->chunk_size + c)
+= (val_ptr[r*cols->chunk_size + c] *
src(chunk_row * cols->chunk_size + r));
// like matrix_scalar_product, except that
// the two vectors are now the same
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
// loop over all chunks. note that we need
// to treat the last chunk row and column
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
- const unsigned int n_regular_chunk_rows
+ const size_type n_regular_chunk_rows
= (rows_have_padding ?
n_chunk_rows-1 :
n_chunk_rows);
- const number *val_ptr = val;
- const unsigned int *colnum_ptr = cols->sparsity_pattern.colnums;
+ const number *val_ptr = val;
+ const size_type *colnum_ptr = cols->sparsity_pattern.colnums;
typename Vector<somenumber>::const_iterator v_ptr = v.begin();
- for (unsigned int chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
+ for (size_type chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
{
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
else
// we're at a chunk column that
// has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
v(chunk_row * cols->chunk_size + r)
// necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
{
// we're at a chunk row but not
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
result
+=
v(chunk_row * cols->chunk_size + r)
else
// we're at a chunk row and
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
v(chunk_row * cols->chunk_size + r)
// function
somenumber result = 0;
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
// loop over all chunks. note that we need
// to treat the last chunk row and column
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
- const unsigned int n_regular_chunk_rows
+ const size_type n_regular_chunk_rows
= (rows_have_padding ?
n_chunk_rows-1 :
n_chunk_rows);
- const number *val_ptr = val;
- const unsigned int *colnum_ptr = cols->sparsity_pattern.colnums;
+ const number *val_ptr = val;
+ const size_type *colnum_ptr = cols->sparsity_pattern.colnums;
typename Vector<somenumber>::const_iterator u_ptr = u.begin();
- for (unsigned int chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
+ for (size_type chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
{
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
else
// we're at a chunk column that
// has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
u(chunk_row * cols->chunk_size + r)
// necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
{
// we're at a chunk row but not
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
result
+=
u(chunk_row * cols->chunk_size + r)
else
// we're at a chunk row and
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
result
+=
u(chunk_row * cols->chunk_size + r)
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
// loop over all rows and columns; it is
// safe to also loop over the padding
Vector<real_type> column_sums(cols->sparsity_pattern.n_cols() *
cols->chunk_size);
- for (unsigned int chunk_row=0; chunk_row<n_chunk_rows; ++chunk_row)
- for (unsigned int j=cols->sparsity_pattern.rowstart[chunk_row];
+ for (size_type chunk_row=0; chunk_row<n_chunk_rows; ++chunk_row)
+ for (size_type j=cols->sparsity_pattern.rowstart[chunk_row];
j<cols->sparsity_pattern.rowstart[chunk_row+1] ; ++j)
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int s=0; s<cols->chunk_size; ++s)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type s=0; s<cols->chunk_size; ++s)
column_sums(cols->sparsity_pattern.colnums[j] *
cols->chunk_size + s) +=
numbers::NumberTraits<number>::abs(val[j * cols->chunk_size *
// done in the SparseMatrix class but since
// it is rarely called in time critical
// places it is probably not worth it
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
// loop over all rows and columns; it is
// safe to also loop over the padding
Vector<real_type> row_sums(cols->sparsity_pattern.n_rows() *
cols->chunk_size);
- for (unsigned int chunk_row=0; chunk_row<n_chunk_rows; ++chunk_row)
- for (unsigned int j=cols->sparsity_pattern.rowstart[chunk_row];
+ for (size_type chunk_row=0; chunk_row<n_chunk_rows; ++chunk_row)
+ for (size_type j=cols->sparsity_pattern.rowstart[chunk_row];
j<cols->sparsity_pattern.rowstart[chunk_row+1] ; ++j)
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int s=0; s<cols->chunk_size; ++s)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type s=0; s<cols->chunk_size; ++s)
row_sums(chunk_row * cols->chunk_size + r) +=
numbers::NumberTraits<number>::abs(val[j * cols->chunk_size *
cols->chunk_size +
// vmult_add, except that we subtract
// rather than add A*u
/////////
- const unsigned int n_chunk_rows = cols->sparsity_pattern.n_rows();
+ const size_type n_chunk_rows = cols->sparsity_pattern.n_rows();
// loop over all chunks. note that we need
// to treat the last chunk row and column
const bool rows_have_padding = (m() % cols->chunk_size != 0),
cols_have_padding = (n() % cols->chunk_size != 0);
- const unsigned int n_regular_chunk_rows
+ const size_type n_regular_chunk_rows
= (rows_have_padding ?
n_chunk_rows-1 :
n_chunk_rows);
const number *val_ptr = val;
- const unsigned int *colnum_ptr = cols->sparsity_pattern.colnums;
+ const size_type *colnum_ptr = cols->sparsity_pattern.colnums;
typename Vector<somenumber>::iterator dst_ptr = dst.begin();
- for (unsigned int chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
+ for (size_type chunk_row=0; chunk_row<n_regular_chunk_rows; ++chunk_row)
{
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
else
// we're at a chunk column that
// has padding
- for (unsigned int r=0; r<cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
-= (val_ptr[r*cols->chunk_size + c] *
u(*colnum_ptr * cols->chunk_size + c));
// necessary
if (rows_have_padding)
{
- const unsigned int chunk_row = n_chunk_rows - 1;
+ const size_type chunk_row = n_chunk_rows - 1;
const number *const val_end_of_row = &val[cols->sparsity_pattern.rowstart[chunk_row+1]
* cols->chunk_size
{
// we're at a chunk row but not
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
-= (val_ptr[r*cols->chunk_size + c] *
u(*colnum_ptr * cols->chunk_size + c));
else
// we're at a chunk row and
// column that has padding
- for (unsigned int r=0; r<m() % cols->chunk_size; ++r)
- for (unsigned int c=0; c<n() % cols->chunk_size; ++c)
+ for (size_type r=0; r<m() % cols->chunk_size; ++r)
+ for (size_type c=0; c<n() % cols->chunk_size; ++c)
dst(chunk_row * cols->chunk_size + r)
-= (val_ptr[r*cols->chunk_size + c] *
u(*colnum_ptr * cols->chunk_size + c));
template <typename somenumber>
void
ChunkSparseMatrix<number>::PSOR (Vector<somenumber> &dst,
- const std::vector<unsigned int> &permutation,
- const std::vector<unsigned int> &inverse_permutation,
+ const std::vector<size_type> &permutation,
+ const std::vector<size_type> &inverse_permutation,
const number /*om*/) const
{
Assert (cols != 0, ExcNotInitialized());
template <typename somenumber>
void
ChunkSparseMatrix<number>::TPSOR (Vector<somenumber> &dst,
- const std::vector<unsigned int> &permutation,
- const std::vector<unsigned int> &inverse_permutation,
+ const std::vector<size_type> &permutation,
+ const std::vector<size_type> &inverse_permutation,
const number /*om*/) const
{
Assert (cols != 0, ExcNotInitialized());
width = precision+2;
}
- for (unsigned int i=0; i<m(); ++i)
+ for (size_type i=0; i<m(); ++i)
{
- for (unsigned int j=0; j<n(); ++j)
+ for (size_type j=0; j<n(); ++j)
if (cols->sparsity_pattern(i,j) != SparsityPattern::invalid_entry)
out << std::setw(width)
<< val[cols->sparsity_pattern(i,j)] * denominator << ' ';
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
- const unsigned int chunk_size = cols->get_chunk_size();
+ const size_type chunk_size = cols->get_chunk_size();
// loop over all chunk rows and columns,
// and each time we find something repeat
// it chunk_size times in both directions
- for (unsigned int i=0; i<cols->sparsity_pattern.n_rows(); ++i)
+ for (size_type i=0; i<cols->sparsity_pattern.n_rows(); ++i)
{
- for (unsigned int d=0; d<chunk_size; ++d)
- for (unsigned int j=0; j<cols->sparsity_pattern.n_cols(); ++j)
+ for (size_type d=0; d<chunk_size; ++d)
+ for (size_type j=0; j<cols->sparsity_pattern.n_cols(); ++j)
if (cols->sparsity_pattern(i,j) == SparsityPattern::invalid_entry)
{
- for (unsigned int e=0; e<chunk_size; ++e)
+ for (size_type e=0; e<chunk_size; ++e)
out << '.';
}
else if (std::fabs(val[cols->sparsity_pattern(i,j)]) > threshold)
{
- for (unsigned int e=0; e<chunk_size; ++e)
+ for (size_type e=0; e<chunk_size; ++e)
out << '*';
}
else
{
- for (unsigned int e=0; e<chunk_size; ++e)
+ for (size_type e=0; e<chunk_size; ++e)
out << ':';
}
out << std::endl;
class ChunkSparsityPattern : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
/**
* Define a value which is used
* but the actual value of the
* variable may change over time.
*/
- static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
+ static const size_type invalid_entry = SparsityPattern::invalid_entry;
/**
* Initialize the matrix empty,
* @arg max_per_row maximum
* number of nonzero entries per row
*/
- ChunkSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
- const unsigned int max_chunks_per_row,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const size_type max_chunks_per_row,
+ const size_type chunk_size);
/**
* @deprecated This constructor is deprecated. Use the version
* without the last argument
*/
- ChunkSparsityPattern (const unsigned int m,
- const unsigned int n,
- const unsigned int max_chunks_per_row,
- const unsigned int chunk_size,
+ ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const size_type max_chunks_per_row,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
* each row. This vector must
* have one entry for each row.
*/
- ChunkSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size);
/**
* @deprecated This constructor is deprecated. Use the version
* without the last argument
*/
- ChunkSparsityPattern (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
+ ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
* taking row and column numbers
* separately.
*/
- ChunkSparsityPattern (const types::global_dof_index n,
- const unsigned int max_per_row,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size);
/**
* Initialize a quadratic matrix.
* each row. This vector must
* have one entry for each row.
*/
- ChunkSparsityPattern (const types::global_dof_index m,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size);
+ ChunkSparsityPattern (const size_type m,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size);
/**
* @deprecated This constructor is deprecated. Use the version
* without the last argument
*/
- ChunkSparsityPattern (const unsigned int m,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
+ ChunkSparsityPattern (const size_type m,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
* operations to the other
* <tt>reinit</tt> function.
*/
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const unsigned int max_per_row,
- const unsigned int chunk_size);
+ void reinit (const size_type m,
+ const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
* without the last argument
*/
- void reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const unsigned int chunk_size,
+ void reinit (const size_type m,
+ const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
* optimized access in relaxation
* methods of SparseMatrix.
*/
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size);
+ void reinit (const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
* without the last argument
*/
- void reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
- const bool optimize_diagonal) DEAL_II_DEPRECATED;
+ void reinit (const size_type m,
+ const size_type n,
+ const std::vector<size_type > &row_lengths,
+ const size_type chunk_size,
+ const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
* Same as above, but with a
* VectorSlice argument instead.
*/
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const unsigned int chunk_size);
+ void reinit (const size_type m,
+ const size_type n,
+ const VectorSlice<const std::vector<size_type> > &row_lengths,
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
* without the last argument
*/
- void reinit (const unsigned int m,
- const unsigned int n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const unsigned int chunk_size,
+ void reinit (const size_type m,
+ const size_type n,
+ const VectorSlice<const std::vector<size_type> > &row_lengths,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
* contents of one
* line. Dereferencing these
* inner iterators must either
- * yield a pair of an unsigned
- * integer as column index and a
+ * yield a pair of a size_type
+ * as column index and a
* value of arbitrary type (such
* a type would be used if we
* wanted to describe a sparse
* matrix with one such object),
- * or simply an unsigned integer
+ * or simply a size_type
* (of we only wanted to describe
* a sparsity pattern). The
* function is able to determine
* may be used to fill a sparsity
* pattern:
* @code
- * std::vector<std::vector<unsigned int> > column_indices (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
+ * std::vector<std::vector<size_type> > column_indices (n_rows);
+ * for (size_type row=0; row<n_rows; ++row)
* // generate necessary columns in this row
* fill_row (column_indices[row]);
*
* <tt>end</tt> (namely
* <tt>std::vector</tt>s), and the
* inner iterators dereferenced
- * yield unsigned integers as
+ * yield size_type as
* column indices. Note that we
* could have replaced each of
* the two <tt>std::vector</tt>
* whole matrix, not only a
* sparsity pattern:
* @code
- * std::vector<std::map<unsigned int,double> > entries (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
+ * std::vector<std::map<size_type,double> > entries (n_rows);
+ * for (size_type row=0; row<n_rows; ++row)
* // generate necessary pairs of columns
* // and corresponding values in this row
* fill_row (entries[row]);
* This example works because
* dereferencing iterators of the
* inner type yields a pair of
- * unsigned integers and a value,
+ * size_type and a value,
* the first of which we take as
* column index. As previously,
* the outer <tt>std::vector</tt>
* could be replaced by
* <tt>std::list</tt>, and the inner
- * <tt>std::map<unsigned int,double></tt>
+ * <tt>std::map<size_type,double></tt>
* could be replaced by
- * <tt>std::vector<std::pair<unsigned int,double> ></tt>,
+ * <tt>std::vector<std::pair<size_type,double> ></tt>,
* or a list or set of such
* pairs, as they all return
* iterators that point to such
* pairs.
*/
template <typename ForwardIterator>
- void copy_from (const types::global_dof_index n_rows,
- const types::global_dof_index n_cols,
+ void copy_from (const size_type n_rows,
+ const size_type n_cols,
const ForwardIterator begin,
const ForwardIterator end,
- const unsigned int chunk_size);
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
* without the last argument
*/
template <typename ForwardIterator>
- void copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
+ void copy_from (const size_type n_rows,
+ const size_type n_cols,
const ForwardIterator begin,
const ForwardIterator end,
- const unsigned int chunk_size,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
*/
template <typename SparsityType>
void copy_from (const SparsityType &csp,
- const unsigned int chunk_size);
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
*/
template <typename SparsityType>
void copy_from (const SparsityType &csp,
- const unsigned int chunk_size,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
*/
template <typename number>
void copy_from (const FullMatrix<number> &matrix,
- const unsigned int chunk_size);
+ const size_type chunk_size);
/**
* @deprecated This function is deprecated. Use the function
*/
template <typename number>
void copy_from (const FullMatrix<number> &matrix,
- const unsigned int chunk_size,
+ const size_type chunk_size,
const bool optimize_diagonal) DEAL_II_DEPRECATED;
/**
* argument when constructing this
* object.
*/
- unsigned int get_chunk_size () const;
+ size_type get_chunk_size () const;
/**
* Return the maximum number of entries per
* number of entries actually allocated by
* the user.
*/
- unsigned int max_entries_per_row () const;
+ size_type max_entries_per_row () const;
/**
* Add a nonzero entry to the matrix.
* If the entry already exists, nothing
* bad happens.
*/
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
+ void add (const size_type i,
+ const size_type j);
/**
* Make the sparsity pattern
* matrix, which equals the dimension
* of the image space.
*/
- inline types::global_dof_index n_rows () const;
+ inline size_type n_rows () const;
/**
* Return number of columns of this
* matrix, which equals the dimension
* of the range space.
*/
- inline types::global_dof_index n_cols () const;
+ inline size_type n_cols () const;
/**
* Check if a value at a certain
* position may be non-zero.
*/
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
+ bool exists (const size_type i,
+ const size_type j) const;
/**
* Number of entries in a specific row.
*/
- unsigned int row_length (const types::global_dof_index row) const;
+ size_type row_length (const size_type row) const;
/**
* Compute the bandwidth of the matrix
* bandwidth a $n\times m$ matrix can
* have is $\max\{n-1,m-1\}$.
*/
- unsigned int bandwidth () const;
+ size_type bandwidth () const;
/**
* Return the number of nonzero elements of
* matrix struct is compressed. It does not
* make too much sense otherwise anyway.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Return whether the structure is
* Number of rows that this sparsity
* structure shall represent.
*/
- types::global_dof_index rows;
+ size_type rows;
/**
* Number of columns that this sparsity
* structure shall represent.
*/
- types::global_dof_index cols;
+ size_type cols;
/**
* The size of chunks.
*/
- unsigned int chunk_size;
+ size_type chunk_size;
/**
* The reduced sparsity pattern. We store
inline
-types::global_dof_index
+size_type
ChunkSparsityPattern::n_rows () const
{
return rows;
inline
-types::global_dof_index
+size_type
ChunkSparsityPattern::n_cols () const
{
return cols;
inline
-unsigned int
+size_type
ChunkSparsityPattern::get_chunk_size () const
{
return chunk_size;
template <typename ForwardIterator>
void
-ChunkSparsityPattern::copy_from (const types::global_dof_index n_rows,
- const types::global_dof_index n_cols,
+ChunkSparsityPattern::copy_from (const size_type n_rows,
+ const size_type n_cols,
const ForwardIterator begin,
const ForwardIterator end,
- const unsigned int chunk_size,
+ const size_type chunk_size,
const bool)
{
copy_from (n_rows, n_cols, begin, end, chunk_size);
template <typename ForwardIterator>
void
-ChunkSparsityPattern::copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
+ChunkSparsityPattern::copy_from (const size_type n_rows,
+ const size_type n_cols,
const ForwardIterator begin,
const ForwardIterator end,
- const unsigned int chunk_size)
+ const size_type chunk_size)
{
- Assert (static_cast<unsigned int>(std::distance (begin, end)) == n_rows,
+ Assert (static_cast<size_type>(std::distance (begin, end)) == n_rows,
ExcIteratorRange (std::distance (begin, end), n_rows));
// first determine row lengths for
// diagonal entry is in a certain
// row or not
const bool is_square = (n_rows == n_cols);
- std::vector<unsigned int> row_lengths;
+ std::vector<size_type> row_lengths;
row_lengths.reserve(n_rows);
for (ForwardIterator i=begin; i!=end; ++i)
row_lengths.push_back (std::distance (i->begin(), i->end())
// now enter all the elements into
// the matrix
- unsigned int row = 0;
+ size_type row = 0;
typedef typename std::iterator_traits<ForwardIterator>::value_type::const_iterator inner_iterator;
for (ForwardIterator i=begin; i!=end; ++i, ++row)
{
const inner_iterator end_of_row = i->end();
for (inner_iterator j=i->begin(); j!=end_of_row; ++j)
{
- const unsigned int col
+ const size_type col
= internal::SparsityPatternTools::get_column_index_from_iterator(*j);
Assert (col < n_cols, ExcInvalidIndex(col,n_cols));
class CompressedSetSparsityPattern : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* An iterator that can be used to
* iterate over the elements of a single
* row. The result of dereferencing such
* an iterator is a column index.
*/
- typedef std::set<unsigned int>::const_iterator row_iterator;
+ typedef std::set<size_type>::const_iterator row_iterator;
/**
* matrix with @p m rows and
* @p n columns.
*/
- CompressedSetSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n);
+ CompressedSetSparsityPattern (const size_type m,
+ const size_type n);
/**
* Initialize a square matrix of
* dimension @p n.
*/
- CompressedSetSparsityPattern (const types::global_dof_index n);
+ CompressedSetSparsityPattern (const size_type n);
/**
* Copy operator. For this the
* max_entries_per_row() nonzero
* entries per row.
*/
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n);
+ void reinit (const size_type m,
+ const size_type n);
/**
* Since this object is kept
* this number may change as
* entries are added.
*/
- unsigned int max_entries_per_row () const;
+ size_type max_entries_per_row () const;
/**
* Add a nonzero entry to the
* matrix. If the entry already
* exists, nothing bad happens.
*/
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
+ void add (const size_type i,
+ const size_type j);
/**
* Add several nonzero entries to the
* happens.
*/
template <typename ForwardIterator>
- void add_entries (const types::global_dof_index row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
+ void add_entries (const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted = false);
/**
* Check if a value at a certain
* position may be non-zero.
*/
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
+ bool exists (const size_type i,
+ const size_type j) const;
/**
* Make the sparsity pattern
* matrix, which equals the dimension
* of the image space.
*/
- types::global_dof_index n_rows () const;
+ size_type n_rows () const;
/**
* Return number of columns of this
* matrix, which equals the dimension
* of the range space.
*/
- types::global_dof_index n_cols () const;
+ size_type n_cols () const;
/**
* Number of entries in a specific row.
*/
- unsigned int row_length (const types::global_dof_index row) const;
+ size_type row_length (const size_type row) const;
/**
* Return an iterator that can loop over
* row. Dereferencing the iterator yields
* a column index.
*/
- row_iterator row_begin (const types::global_dof_index row) const;
+ row_iterator row_begin (const size_type row) const;
/**
* End iterator for the given row.
*/
- row_iterator row_end (const types::global_dof_index row) const;
+ row_iterator row_end (const size_type row) const;
/**
* $(i,j)$ represents a nonzero entry
* of the matrix.
*/
- unsigned int bandwidth () const;
+ size_type bandwidth () const;
/**
* Return the number of nonzero elements
* allocated through this sparsity
* pattern.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Return whether this object stores only
* Number of rows that this sparsity
* structure shall represent.
*/
- types::global_dof_index rows;
+ size_type rows;
/**
* Number of columns that this sparsity
* structure shall represent.
*/
- types::global_dof_index cols;
+ size_type cols;
/**
* For each row of the matrix, store the
*/
struct Line
{
- std::set<unsigned int> entries;
+ std::set<size_type> entries;
/**
* Constructor.
* Add the given column number to
* this line.
*/
- void add (const types::global_dof_index col_num);
+ void add (const size_type col_num);
/**
* Add the columns specified by the
inline
void
-CompressedSetSparsityPattern::Line::add (const types::global_dof_index j)
+CompressedSetSparsityPattern::Line::add (const size_type j)
{
entries.insert (j);
}
inline
-types::global_dof_index
+size_type
CompressedSetSparsityPattern::n_rows () const
{
return rows;
inline
-types::global_dof_index
+size_type
CompressedSetSparsityPattern::n_cols () const
{
return cols;
inline
void
-CompressedSetSparsityPattern::add (const types::global_dof_index i,
- const types::global_dof_index j)
+CompressedSetSparsityPattern::add (const size_type i,
+ const size_type j)
{
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
template <typename ForwardIterator>
inline
void
-CompressedSetSparsityPattern::add_entries (const types::global_dof_index row,
+CompressedSetSparsityPattern::add_entries (const size_type row,
ForwardIterator begin,
ForwardIterator end,
const bool /*indices_are_sorted*/)
inline
-unsigned int
-CompressedSetSparsityPattern::row_length (const types::global_dof_index row) const
+size_type
+CompressedSetSparsityPattern::row_length (const size_type row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
inline
CompressedSetSparsityPattern::row_iterator
-CompressedSetSparsityPattern::row_begin (const types::global_dof_index row) const
+CompressedSetSparsityPattern::row_begin (const size_type row) const
{
return (lines[row].entries.begin ());
}
inline
CompressedSetSparsityPattern::row_iterator
-CompressedSetSparsityPattern::row_end (const types::global_dof_index row) const
+CompressedSetSparsityPattern::row_end (const size_type row) const
{
return (lines[row].entries.end ());
}
class CompressedSimpleSparsityPattern : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* An iterator that can be used to
* iterate over the elements of a single
* row. The result of dereferencing such
* an iterator is a column index.
*/
- typedef std::vector<unsigned int>::const_iterator row_iterator;
+ typedef std::vector<size_type>::const_iterator row_iterator;
/**
* Initialize the matrix empty,
* default argument keeps all
* entries.
*/
- CompressedSimpleSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
+ CompressedSimpleSparsityPattern (const size_type m,
+ const size_type n,
const IndexSet &rowset = IndexSet());
/**
* Initialize a square matrix of
* dimension @p n.
*/
- CompressedSimpleSparsityPattern (const types::global_dof_index n);
+ CompressedSimpleSparsityPattern (const size_type n);
/**
* Copy operator. For this the
* default argument keeps all
* entries.
*/
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
+ void reinit (const size_type m,
+ const size_type n,
const IndexSet &rowset = IndexSet());
/**
* this number may change as
* entries are added.
*/
- unsigned int max_entries_per_row () const;
+ size_type max_entries_per_row () const;
/**
* Add a nonzero entry to the
* matrix. If the entry already
* exists, nothing bad happens.
*/
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
+ void add (const size_type i,
+ const size_type j);
/**
* Add several nonzero entries to the
* happens.
*/
template <typename ForwardIterator>
- void add_entries (const types::global_dof_index row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_unique_and_sorted = false);
+ void add_entries (const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_unique_and_sorted = false);
/**
* Check if a value at a certain
* position may be non-zero.
*/
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
+ bool exists (const size_type i,
+ const size_type j) const;
/**
* Make the sparsity pattern
* matrix, which equals the dimension
* of the image space.
*/
- types::global_dof_index n_rows () const;
+ size_type n_rows () const;
/**
* Return number of columns of this
* matrix, which equals the dimension
* of the range space.
*/
- types::global_dof_index n_cols () const;
+ size_type n_cols () const;
/**
* Number of entries in a
* index set of rows that we want
* to store.
*/
- unsigned int row_length (const types::global_dof_index row) const;
+ size_type row_length (const size_type row) const;
/**
* Access to column number field.
* Return the column number of
* the @p indexth entry in @p row.
*/
- unsigned int column_number (const types::global_dof_index row,
- const types::global_dof_index index) const;
+ size_type column_number (const size_type row,
+ const size_type index) const;
/**
* Return an iterator that can loop over
* row. Dereferencing the iterator yields
* a column index.
*/
- row_iterator row_begin (const types::global_dof_index row) const;
+ row_iterator row_begin (const size_type row) const;
/**
* Returns the end of the current row.
*/
- row_iterator row_end (const types::global_dof_index row) const;
+ row_iterator row_end (const size_type row) const;
/**
* Compute the bandwidth of the matrix
* represented by this structure. The
* $(i,j)$ represents a nonzero entry
* of the matrix.
*/
- unsigned int bandwidth () const;
+ size_type bandwidth () const;
/**
* Return the number of nonzero elements
* allocated through this sparsity pattern.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Return the IndexSet that sets which
* memory consumption (in bytes)
* of this object.
*/
- std::size_t memory_consumption () const;
+ size_type memory_consumption () const;
private:
/**
* Number of rows that this sparsity
* structure shall represent.
*/
- types::global_dof_index rows;
+ size_type rows;
/**
* Number of columns that this sparsity
* structure shall represent.
*/
- types::global_dof_index cols;
+ size_type cols;
/**
* A set that contains the valid rows.
* this row. This array is always
* kept sorted.
*/
- std::vector<unsigned int> entries;
+ std::vector<size_type> entries;
/**
* Constructor.
* Add the given column number to
* this line.
*/
- void add (const types::global_dof_index col_num);
+ void add (const size_type col_num);
/**
* Add the columns specified by the
/**
* estimates memory consumption.
*/
- std::size_t memory_consumption () const;
+ size_type memory_consumption () const;
};
inline
void
-CompressedSimpleSparsityPattern::Line::add (const types::global_dof_index j)
+CompressedSimpleSparsityPattern::Line::add (const size_type j)
{
// first check the last element (or if line
// is still empty)
// do a binary search to find the place
// where to insert:
- std::vector<unsigned int>::iterator
+ std::vector<size_type>::iterator
it = Utilities::lower_bound(entries.begin(),
entries.end(),
j);
inline
-types::global_dof_index
+size_type
CompressedSimpleSparsityPattern::n_rows () const
{
return rows;
inline
-types::global_dof_index
+size_type
CompressedSimpleSparsityPattern::n_cols () const
{
return cols;
inline
void
-CompressedSimpleSparsityPattern::add (const types::global_dof_index i,
- const types::global_dof_index j)
+CompressedSimpleSparsityPattern::add (const size_type i,
+ const size_type j)
{
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
if (rowset.size() > 0 && !rowset.is_element(i))
return;
- const unsigned int rowindex =
+ const size_type rowindex =
rowset.size()==0 ? i : rowset.index_within_set(i);
lines[rowindex].add (j);
}
template <typename ForwardIterator>
inline
void
-CompressedSimpleSparsityPattern::add_entries (const types::global_dof_index row,
+CompressedSimpleSparsityPattern::add_entries (const size_type row,
ForwardIterator begin,
ForwardIterator end,
const bool indices_are_sorted)
if (rowset.size() > 0 && !rowset.is_element(row))
return;
- const unsigned int rowindex =
+ const size_type rowindex =
rowset.size()==0 ? row : rowset.index_within_set(row);
lines[rowindex].add_entries (begin, end, indices_are_sorted);
}
inline
-unsigned int
-CompressedSimpleSparsityPattern::row_length (const types::global_dof_index row) const
+size_type
+CompressedSimpleSparsityPattern::row_length (const size_type row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
if (rowset.size() > 0 && !rowset.is_element(row))
return 0;
- const unsigned int rowindex =
+ const size_type rowindex =
rowset.size()==0 ? row : rowset.index_within_set(row);
return lines[rowindex].entries.size();
}
inline
-unsigned int
-CompressedSimpleSparsityPattern::column_number (const types::global_dof_index row,
- const types::global_dof_index index) const
+size_type
+CompressedSimpleSparsityPattern::column_number (const size_type row,
+ const size_type index) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
Assert( rowset.size() == 0 || rowset.is_element(row), ExcInternalError());
- const unsigned int local_row = rowset.size() ? rowset.index_within_set(row) : row;
+ const size_type local_row = rowset.size() ? rowset.index_within_set(row) : row;
Assert (index < lines[local_row].entries.size(),
ExcIndexRange (index, 0, lines[local_row].entries.size()));
return lines[local_row].entries[index];
inline
CompressedSimpleSparsityPattern::row_iterator
-CompressedSimpleSparsityPattern::row_begin (const types::global_dof_index row) const
+CompressedSimpleSparsityPattern::row_begin (const size_type row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
- const unsigned int local_row = rowset.size() ? rowset.index_within_set(row) : row;
+ const size_type local_row = rowset.size() ? rowset.index_within_set(row) : row;
return lines[local_row].entries.begin();
}
inline
CompressedSimpleSparsityPattern::row_iterator
-CompressedSimpleSparsityPattern::row_end (const types::global_dof_index row) const
+CompressedSimpleSparsityPattern::row_end (const size_type row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
- const unsigned int local_row = rowset.size() ? rowset.index_within_set(row) : row;
+ const size_type local_row = rowset.size() ? rowset.index_within_set(row) : row;
return lines[local_row].entries.end();
}
class CompressedSparsityPattern : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* An iterator that can be used to
* iterate over the elements of a single
* row. The result of dereferencing such
* an iterator is a column index.
*/
- typedef std::vector<unsigned int>::const_iterator row_iterator;
+ typedef std::vector<size_type>::const_iterator row_iterator;
/**
* Initialize the matrix empty,
* matrix with @p m rows and
* @p n columns.
*/
- CompressedSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n);
+ CompressedSparsityPattern (const size_type m,
+ const size_type n);
/**
* Initialize a square matrix of
* dimension @p n.
*/
- CompressedSparsityPattern (const types::global_dof_index n);
+ CompressedSparsityPattern (const size_type n);
/**
* Copy operator. For this the
* max_entries_per_row() nonzero
* entries per row.
*/
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n);
+ void reinit (const size_type m,
+ const size_type n);
/**
* Since this object is kept
* this number may change as
* entries are added.
*/
- unsigned int max_entries_per_row () const;
+ size_type max_entries_per_row () const;
/**
* Add a nonzero entry to the
* matrix. If the entry already
* exists, nothing bad happens.
*/
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
+ void add (const size_type i,
+ const size_type j);
/**
* Add several nonzero entries to the
* happens.
*/
template <typename ForwardIterator>
- void add_entries (const types::global_dof_index row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_unique_and_sorted = false);
+ void add_entries (const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_unique_and_sorted = false);
/**
* Check if a value at a certain
* position may be non-zero.
*/
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
+ bool exists (const size_type i,
+ const size_type j) const;
/**
* Make the sparsity pattern
* matrix, which equals the dimension
* of the image space.
*/
- types::global_dof_index n_rows () const;
+ size_type n_rows () const;
/**
* Return number of columns of this
* matrix, which equals the dimension
* of the range space.
*/
- types::global_dof_index n_cols () const;
+ size_type n_cols () const;
/**
* Number of entries in a specific row.
*/
- unsigned int row_length (const types::global_dof_index row) const;
+ size_type row_length (const size_type row) const;
/**
* Access to column number field.
* Return the column number of
* the @p indexth entry in @p row.
*/
- unsigned int column_number (const types::global_dof_index row,
- const unsigned int index) const;
+ size_type column_number (const size_type row,
+ const size_type index) const;
/**
* Return an iterator that can loop over
* row. Dereferencing the iterator yields
* a column index.
*/
- row_iterator row_begin (const types::global_dof_index row) const;
+ row_iterator row_begin (const size_type row) const;
/**
* Returns the end of the current row.
*/
- row_iterator row_end (const types::global_dof_index row) const;
+ row_iterator row_end (const size_type row) const;
/**
* Compute the bandwidth of the matrix
* $(i,j)$ represents a nonzero entry
* of the matrix.
*/
- unsigned int bandwidth () const;
+ size_type bandwidth () const;
/**
* Return the number of nonzero elements
* allocated through this sparsity
* pattern.
*/
- unsigned int n_nonzero_elements () const;
+ size_type n_nonzero_elements () const;
/**
* Return whether this object stores only
* Number of rows that this sparsity
* structure shall represent.
*/
- types::global_dof_index rows;
+ size_type rows;
/**
* Number of columns that this sparsity
* structure shall represent.
*/
- types::global_dof_index cols;
+ size_type cols;
/**
* Store some data for each row
* Add the given column number to
* this line.
*/
- void add (const types::global_dof_index col_num);
+ void add (const size_type col_num);
/**
* Add the columns specified by the
inline
void
-CompressedSparsityPattern::Line::add (const types::global_dof_index j)
+CompressedSparsityPattern::Line::add (const size_type j)
{
// first check whether this entry is
// already in the cache. if so, we can
inline
-types::global_dof_index
+size_type
CompressedSparsityPattern::n_rows () const
{
return rows;
inline
-types::global_dof_index
+size_type
CompressedSparsityPattern::n_cols () const
{
return cols;
inline
void
-CompressedSparsityPattern::add (const types::global_dof_index i,
- const types::global_dof_index j)
+CompressedSparsityPattern::add (const size_type i,
+ const size_type j)
{
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
template <typename ForwardIterator>
inline
void
-CompressedSparsityPattern::add_entries (const types::global_dof_index row,
+CompressedSparsityPattern::add_entries (const size_type row,
ForwardIterator begin,
ForwardIterator end,
const bool indices_are_sorted)
inline
-unsigned int
-CompressedSparsityPattern::row_length (const types::global_dof_index row) const
+size_type
+CompressedSparsityPattern::row_length (const size_type row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
inline
-unsigned int
-CompressedSparsityPattern::column_number (const types::global_dof_index row,
- const unsigned int index) const
+size_type
+CompressedSparsityPattern::column_number (const size_type row,
+ const size_type index) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
Assert (index < lines[row].entries.size(),
inline
CompressedSparsityPattern::row_iterator
-CompressedSparsityPattern::row_begin (const types::global_dof_index row) const
+CompressedSparsityPattern::row_begin (const size_type row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
inline
CompressedSparsityPattern::row_iterator
-CompressedSparsityPattern::row_end (const types::global_dof_index row) const
+CompressedSparsityPattern::row_end (const size_type row) const
{
Assert (row < n_rows(), ExcIndexRange (row, 0, n_rows()));
return lines[row].entries.end();
//TODO[WB]: We should have a function of the kind
-// ConstraintMatrix::add_constraint (const types::global_dof_index constrained_dof,
-// const std::vector<std::pair<types::global_dof_index, double> > &entries,
+// ConstraintMatrix::add_constraint (const size_type constrained_dof,
+// const std::vector<std::pair<size_type, double> > &entries,
// const double inhomogeneity = 0);
// rather than building up constraints piecemeal through add_line/add_entry
// etc. This would also eliminate the possibility of accidentally changing
class ConstraintMatrix : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* An enum that describes what should
* happen if the two ConstraintMatrix
* line. Always returns true if not in
* the distributed case.
*/
- bool can_store_line (const types::global_dof_index line_index) const;
+ bool can_store_line (const size_type line_index) const;
/**
* This function copies the content of @p
* line already exists, then the function
* simply returns without doing anything.
*/
- void add_line (const types::global_dof_index line);
+ void add_line (const size_type line);
/**
* Call the first add_line() function for
* inhomogeneities using
* set_inhomogeneity().
*/
- void add_lines (const std::set<types::global_dof_index> &lines);
+ void add_lines (const std::set<size_type> &lines);
/**
* Call the first add_line() function for
* same. Thus, it does no harm to
* enter a constraint twice.
*/
- void add_entry (const types::global_dof_index line,
- const types::global_dof_index column,
+ void add_entry (const size_type line,
+ const size_type column,
const double value);
/**
* function several times, but is
* faster.
*/
- void add_entries (const types::global_dof_index line,
- const std::vector<std::pair<types::global_dof_index,double> > &col_val_pairs);
+ void add_entries (const size_type line,
+ const std::vector<std::pair<size_type,double> > &col_val_pairs);
/**
* Set an imhomogeneity to the
* @note the line needs to be added with
* one of the add_line() calls first.
*/
- void set_inhomogeneity (const types::global_dof_index line,
- const double value);
+ void set_inhomogeneity (const size_type line,
+ const double value);
/**
* Close the filling of entries. Since
* finally merge() them together
* again.
*/
- void shift (const types::global_dof_index offset);
+ void shift (const size_type offset);
/**
* Clear all entries of this
* Return number of constraints stored in
* this matrix.
*/
- unsigned int n_constraints () const;
+ size_type n_constraints () const;
/**
* Return whether the degree of freedom
* called, we have to perform a linear
* search through all entries.
*/
- bool is_constrained (const types::global_dof_index index) const;
+ bool is_constrained (const size_type index) const;
/**
* Return whether the dof is
* freedom but with a weight different
* from one.
*/
- bool is_identity_constrained (const types::global_dof_index index) const;
+ bool is_identity_constrained (const size_type index) const;
/**
* Return the maximum number of other
* constrained node are indirected to
* the nodes it is constrained to.
*/
- unsigned int max_constraint_indirections () const;
+ size_type max_constraint_indirections () const;
/**
* Returns <tt>true</tt> in case the
* non-trivial inhomogeneous valeus set
* to the dof.
*/
- bool is_inhomogeneously_constrained (const types::global_dof_index index) const;
+ bool is_inhomogeneously_constrained (const size_type index) const;
/**
* Returns <tt>false</tt> if all
* zero pointer in case the dof is not
* constrained.
*/
- const std::vector<std::pair<types::global_dof_index,double> > *
- get_constraint_entries (const types::global_dof_index line) const;
+ const std::vector<std::pair<size_type,double> > *
+ get_constraint_entries (const size_type line) const;
/**
* Returns the value of the inhomogeneity
* line. Unconstrained dofs also return a
* zero value.
*/
- double get_inhomogeneity (const types::global_dof_index line) const;
+ double get_inhomogeneity (const size_type line) const;
/**
* Print the constraint lines. Mainly
* consumption (in bytes) of this
* object.
*/
- std::size_t memory_consumption () const;
+ size_type memory_consumption () const;
/**
* @}
*/
template <class InVector, class OutVector>
void
- distribute_local_to_global (const InVector &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- OutVector &global_vector) const;
+ distribute_local_to_global (const InVector &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ OutVector &global_vector) const;
/**
* This function takes a vector of
*/
template <typename VectorType>
void
- distribute_local_to_global (const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- VectorType &global_vector,
- const FullMatrix<double> &local_matrix) const;
+ distribute_local_to_global (const Vector<double> &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ VectorType &global_vector,
+ const FullMatrix<double> &local_matrix) const;
/**
* Enter a single value into a
*/
template <class VectorType>
void
- distribute_local_to_global (const unsigned int index,
- const double value,
- VectorType &global_vector) const;
+ distribute_local_to_global (const size_type index,
+ const double value,
+ VectorType &global_vector) const;
/**
* This function takes a pointer to a
*/
template <typename MatrixType>
void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix) const;
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const std::vector<size_type> &local_dof_indices,
+ MatrixType &global_matrix) const;
/**
* Does the same as the function
*/
template <typename MatrixType>
void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices,
- MatrixType &global_matrix) const;
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
+ MatrixType &global_matrix) const;
/**
* This function simultaneously
*/
template <typename MatrixType, typename VectorType>
void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs = false) const;
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs = false) const;
/**
* Do a similar operation as the
*/
template <typename SparsityType>
void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries = true,
+ add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries = true,
const Table<2,bool> &dof_mask = default_empty_table) const;
/**
template <typename SparsityType>
void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries = true,
- const Table<2,bool> &dof_mask = default_empty_table) const;
+ add_entries_local_to_global (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries = true,
+ const Table<2,bool> &dof_mask = default_empty_table) const;
/**
* This function imports values from a
* @ingroup Exceptions
*/
DeclException1 (ExcLineInexistant,
- types::global_dof_index,
+ size_type,
<< "The specified line " << arg1
<< " does not exist.");
/**
* of entries that make up the homogenous
* part of a constraint.
*/
- typedef std::vector<std::pair<types::global_dof_index,double> > Entries;
+ typedef std::vector<std::pair<size_type,double> > Entries;
/**
* Number of this line. Since only
* and have to store the line
* number explicitly.
*/
- types::global_dof_index line;
+ size_type line;
/**
* Row numbers and values of the
std::vector<ConstraintLine> lines;
/**
- * A list of unsigned integers that
+ * A list of size_type that
* contains the position of the
* ConstraintLine of a constrained degree
* of freedom, or
* contributions into vectors and
* matrices.
*/
- std::vector<types::global_dof_index> lines_cache;
+ std::vector<size_type> lines_cache;
/**
* This IndexSet is used to limit the
* index of line @p line in the vector
* lines_cache using local_lines.
*/
- unsigned int calculate_line_index (const types::global_dof_index line) const;
+ size_type calculate_line_index (const size_type line) const;
/**
* Return @p true if the weight of an
* used to delete entries with zero
* weight.
*/
- static bool check_zero_weight (const std::pair<types::global_dof_index, double> &p);
+ static bool check_zero_weight (const std::pair<size_type, double> &p);
/**
* Dummy table that serves as default
*/
template <typename MatrixType, typename VectorType>
void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs,
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs,
internal::bool2type<false>) const;
/**
*/
template <typename MatrixType, typename VectorType>
void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs,
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs,
internal::bool2type<true>) const;
/**
*/
template <typename SparsityType>
void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
+ add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
internal::bool2type<false>) const;
/**
*/
template <typename SparsityType>
void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
+ add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
internal::bool2type<true>) const;
/**
* row indices.
*/
void
- make_sorted_row_list (const std::vector<types::global_dof_index> &local_dof_indices,
+ make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
internals::GlobalRowsFromLocal &global_rows) const;
/**
* function.
*/
void
- make_sorted_row_list (const std::vector<types::global_dof_index> &local_dof_indices,
- std::vector<types::global_dof_index> &active_dofs) const;
+ make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
+ std::vector<size_type> &active_dofs) const;
/**
* Internal helper function for
* distribute_local_to_global function.
*/
double
- resolve_vector_entry (const types::global_dof_index i,
+ resolve_vector_entry (const size_type i,
const internals::GlobalRowsFromLocal &global_rows,
const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
+ const std::vector<size_type> &local_dof_indices,
const FullMatrix<double> &local_matrix) const;
};
inline
void
-ConstraintMatrix::add_line (const types::global_dof_index line)
+ConstraintMatrix::add_line (const size_type line)
{
Assert (sorted==false, ExcMatrixIsClosed());
// :-)
Assert (line != numbers::invalid_unsigned_int,
ExcInternalError());
- const unsigned int line_index = calculate_line_index (line);
+ const size_type line_index = calculate_line_index (line);
// check whether line already exists; it
// may, in which case we can just quit
// if necessary enlarge vector of
// existing entries for cache
if (line_index >= lines_cache.size())
- lines_cache.resize (std::max(2*static_cast<unsigned int>(lines_cache.size()),
+ lines_cache.resize (std::max(2*static_cast<size_type>(lines_cache.size()),
line_index+1),
numbers::invalid_unsigned_int);
inline
void
-ConstraintMatrix::add_entry (const types::global_dof_index line,
- const types::global_dof_index column,
- const double value)
+ConstraintMatrix::add_entry (const size_type line,
+ const size_type column,
+ const double value)
{
Assert (sorted==false, ExcMatrixIsClosed());
Assert (line != column,
inline
void
-ConstraintMatrix::set_inhomogeneity (const types::global_dof_index line,
- const double value)
+ConstraintMatrix::set_inhomogeneity (const size_type line,
+ const double value)
{
- const unsigned int line_index = calculate_line_index(line);
+ const size_type line_index = calculate_line_index(line);
Assert( line_index < lines_cache.size() &&
lines_cache[line_index] != numbers::invalid_unsigned_int,
ExcMessage("call add_line() before calling set_inhomogeneity()"));
inline
-unsigned int
+size_type
ConstraintMatrix::n_constraints () const
{
return lines.size();
inline
bool
-ConstraintMatrix::is_constrained (const types::global_dof_index index) const
+ConstraintMatrix::is_constrained (const size_type index) const
{
- const unsigned int line_index = calculate_line_index(index);
+ const size_type line_index = calculate_line_index(index);
return ((line_index < lines_cache.size())
&&
(lines_cache[line_index] != numbers::invalid_unsigned_int));
inline
bool
-ConstraintMatrix::is_inhomogeneously_constrained (const types::global_dof_index index) const
+ConstraintMatrix::is_inhomogeneously_constrained (const size_type index) const
{
// check whether the entry is
// constrained. could use is_constrained, but
// that means computing the line index twice
- const unsigned int line_index = calculate_line_index(index);
+ const size_type line_index = calculate_line_index(index);
if (line_index >= lines_cache.size() ||
lines_cache[line_index] == numbers::invalid_unsigned_int)
return false;
inline
-const std::vector<std::pair<types::global_dof_index,double> > *
-ConstraintMatrix::get_constraint_entries (const types::global_dof_index line) const
+const std::vector<std::pair<size_type,double> > *
+ConstraintMatrix::get_constraint_entries (const size_type line) const
{
// check whether the entry is
// constrained. could use is_constrained, but
// that means computing the line index twice
- const unsigned int line_index = calculate_line_index(line);
+ const size_type line_index = calculate_line_index(line);
if (line_index >= lines_cache.size() ||
lines_cache[line_index] == numbers::invalid_unsigned_int)
return 0;
inline
double
-ConstraintMatrix::get_inhomogeneity (const types::global_dof_index line) const
+ConstraintMatrix::get_inhomogeneity (const size_type line) const
{
// check whether the entry is
// constrained. could use is_constrained, but
// that means computing the line index twice
- const unsigned int line_index = calculate_line_index(line);
+ const size_type line_index = calculate_line_index(line);
if (line_index >= lines_cache.size() ||
lines_cache[line_index] == numbers::invalid_unsigned_int)
return 0;
-inline unsigned int
-ConstraintMatrix::calculate_line_index (const types::global_dof_index line) const
+inline size_type
+ConstraintMatrix::calculate_line_index (const size_type line) const
{
//IndexSet is unused (serial case)
if (!local_lines.size())
inline bool
-ConstraintMatrix::can_store_line (types::global_dof_index line_index) const
+ConstraintMatrix::can_store_line (size_type line_index) const
{
return !local_lines.size() || local_lines.is_element(line_index);
}
template <class VectorType>
inline
void ConstraintMatrix::distribute_local_to_global (
- const types::global_dof_index index,
- const double value,
- VectorType &global_vector) const
+ const size_type index,
+ const double value,
+ VectorType &global_vector) const
{
Assert (lines.empty() || sorted == true, ExcMatrixNotClosed());
{
const ConstraintLine &position =
lines[lines_cache[calculate_line_index(index)]];
- for (unsigned int j=0; j<position.entries.size(); ++j)
+ for (size_type j=0; j<position.entries.size(); ++j)
global_vector(position.entries[j].first)
+= value * position.entries[j].second;
}
{
const ConstraintLine &position =
lines[lines_cache[calculate_line_index(*local_indices_begin)]];
- for (unsigned int j=0; j<position.entries.size(); ++j)
+ for (size_type j=0; j<position.entries.size(); ++j)
global_vector(position.entries[j].first)
+= *local_vector_begin * position.entries[j].second;
}
inline
void
ConstraintMatrix::distribute_local_to_global (
- const InVector &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- OutVector &global_vector) const
+ const InVector &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ OutVector &global_vector) const
{
Assert (local_vector.size() == local_dof_indices.size(),
ExcDimensionMismatch(local_vector.size(), local_dof_indices.size()));
const ConstraintLine &position =
lines[lines_cache[calculate_line_index(*local_indices_begin)]];
typename VectorType::value_type value = position.inhomogeneity;
- for (unsigned int j=0; j<position.entries.size(); ++j)
+ for (size_type j=0; j<position.entries.size(); ++j)
value += (global_vector(position.entries[j].first) *
position.entries[j].second);
*local_vector_begin = value;
inline
void
ConstraintMatrix::
-distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix) const
+distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const std::vector<size_type> &local_dof_indices,
+ MatrixType &global_matrix) const
{
// create a dummy and hand on to the
// function actually implementing this
inline
void
ConstraintMatrix::
-distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs) const
+distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs) const
{
// enter the internal function with the
// respective block information set, the
inline
void
ConstraintMatrix::
-add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask) const
+add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask) const
{
// enter the internal function with the
// respective block information set, the
new_line.reserve (uncondensed.size());
std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- unsigned int shift = 0;
- unsigned int n_rows = uncondensed.size();
+ size_type shift = 0;
+ size_type n_rows = uncondensed.size();
if (next_constraint == lines.end())
// if no constraint is to be handled
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
new_line.push_back (row);
else
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
if (row == next_constraint->line)
{
// this line is constrained
// nothing more to do; finish rest
// of loop
{
- for (unsigned int i=row+1; i<n_rows; ++i)
+ for (size_type i=row+1; i<n_rows; ++i)
new_line.push_back (i-shift);
break;
};
// only evaluated so often as there are
// entries in new_line[*] which tells us
// which constraints exist
- for (unsigned int row=0; row<uncondensed.size(); ++row)
+ for (size_type row=0; row<uncondensed.size(); ++row)
if (new_line[row] != -1)
// line not constrained
// copy entry
else
// line must be distributed
{
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (size_type q=0; q!=next_constraint->entries.size(); ++q)
condensed(new_line[next_constraint->entries[q].first])
+=
uncondensed(row) * next_constraint->entries[q].second;
"without any matrix specified."));
const typename VectorType::value_type old_value = vec(constraint_line->line);
- for (unsigned int q=0; q!=constraint_line->entries.size(); ++q)
+ for (size_type q=0; q!=constraint_line->entries.size(); ++q)
if (vec.in_local_range(constraint_line->entries[q].first) == true)
vec(constraint_line->entries[q].first)
+= (static_cast<typename VectorType::value_type>
new_line.reserve (uncondensed_struct.n_rows());
std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- unsigned int shift = 0;
- const unsigned int n_rows = uncondensed_struct.n_rows();
+ size_type shift = 0;
+ const size_type n_rows = uncondensed_struct.n_rows();
if (next_constraint == lines.end())
// if no constraint is to be handled
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
new_line.push_back (row);
else
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
if (row == next_constraint->line)
{
// this line is constrained
// nothing more to do; finish rest
// of loop
{
- for (unsigned int i=row+1; i<n_rows; ++i)
+ for (size_type i=row+1; i<n_rows; ++i)
new_line.push_back (i-shift);
break;
};
// only evaluated so often as there are
// entries in new_line[*] which tells us
// which constraints exist
- for (unsigned int row=0; row<uncondensed_struct.n_rows(); ++row)
+ for (size_type row=0; row<uncondensed_struct.n_rows(); ++row)
if (new_line[row] != -1)
{
// line not constrained
while (c->line != p->column())
++c;
- for (unsigned int q=0; q!=c->entries.size(); ++q)
+ for (size_type q=0; q!=c->entries.size(); ++q)
// distribute to rows with
// appropriate weight
condensed.add (new_line[row], new_line[c->entries[q].first],
// for each column: distribute
if (new_line[p->column()] != -1)
// column is not constrained
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (size_type q=0; q!=next_constraint->entries.size(); ++q)
condensed.add (new_line[next_constraint->entries[q].first],
new_line[p->column()],
p->value() *
while (c->line != p->column())
++c;
- for (unsigned int r=0; r!=c->entries.size(); ++r)
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (size_type r=0; r!=c->entries.size(); ++r)
+ for (size_type q=0; q!=next_constraint->entries.size(); ++q)
condensed.add (new_line[next_constraint->entries[q].first],
new_line[c->entries[r].first],
p->value() *
c->entries[r].second);
if (use_vectors == true)
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (size_type q=0; q!=next_constraint->entries.size(); ++q)
condensed_vector (new_line[next_constraint->entries[q].first])
-= p->value() *
next_constraint->entries[q].second *
// condense the vector
if (use_vectors == true)
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (size_type q=0; q!=next_constraint->entries.size(); ++q)
condensed_vector(new_line[next_constraint->entries[q].first])
+=
uncondensed_vector(row) * next_constraint->entries[q].second;
AssertDimension (vec.size(), sparsity.n_rows());
double average_diagonal = 0;
- for (unsigned int i=0; i<uncondensed.m(); ++i)
+ for (size_type i=0; i<uncondensed.m(); ++i)
average_diagonal += std::fabs (uncondensed.diag_element(i));
average_diagonal /= uncondensed.m();
// store for each index whether it must be
// distributed or not. If entry is
- // invalid_unsigned_int, no distribution is
+ // invalid_size_type, no distribution is
// necessary. otherwise, the number states
// which line in the constraint matrix
// handles this index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over cols
{
for (typename SparseMatrix<number>::iterator
entry = uncondensed.begin(row);
entry != uncondensed.end(row); ++entry)
{
- const types::global_dof_index column = entry->column();
+ const size_type column = entry->column();
// end of row reached?
// this should not
Assert (column != SparsityPattern::invalid_entry,
ExcMatrixNotClosed());
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
// distribute entry at
// regular row @p row
// and irregular column
// set old entry to
// zero
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size(); ++q)
uncondensed.add (row,
lines[distribute[column]].entries[q].first,
entry = uncondensed.begin(row);
entry != uncondensed.end(row); ++entry)
{
- const types::global_dof_index column = entry->column();
+ const size_type column = entry->column();
// end of row reached?
// this should not
Assert (column != SparsityPattern::invalid_entry,
ExcMatrixNotClosed());
- if (distribute[column] == numbers::invalid_unsigned_int)
+ if (distribute[column] == numbers::invalid_size_type)
// distribute entry at
// irregular row
// @p row and regular
// column. set
// old entry to zero
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
uncondensed.add (lines[distribute[row]].entries[q].first,
column,
// to one on main
// diagonal, zero otherwise
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size(); ++q)
uncondensed.add (lines[distribute[row]].entries[p].first,
lines[distribute[column]].entries[q].first,
// take care of vector
if (use_vectors == true)
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
vec(lines[distribute[row]].entries[q].first)
+= (vec(row) * lines[distribute[row]].entries[q].second);
// the other function above.
const bool use_vectors = vec.n_blocks() == 0 ? false : true;
- const unsigned int blocks = uncondensed.n_block_rows();
+ const size_type blocks = uncondensed.n_block_rows();
const BlockSparsityPattern &
sparsity = uncondensed.get_sparsity_pattern ();
}
double average_diagonal = 0;
- for (unsigned int b=0; b<uncondensed.n_block_rows(); ++b)
- for (unsigned int i=0; i<uncondensed.block(b,b).m(); ++i)
+ for (size_type b=0; b<uncondensed.n_block_rows(); ++b)
+ for (size_type i=0; i<uncondensed.block(b,b).m(); ++i)
average_diagonal += std::fabs (uncondensed.block(b,b).diag_element(i));
average_diagonal /= uncondensed.m();
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
// get index of this row
// within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
+ const size_type block_row = block_index.first;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over
// all columns and see
// whether this column must
// this blockrow and the
// corresponding row
// therein
- for (unsigned int block_col=0; block_col<blocks; ++block_col)
+ for (size_type block_col=0; block_col<blocks; ++block_col)
{
for (typename SparseMatrix<number>::iterator
entry = uncondensed.block(block_row, block_col).begin(block_index.second);
entry != uncondensed.block(block_row, block_col).end(block_index.second);
++entry)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global(block_col,entry->column());
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
// distribute entry at
// regular row @p row
// and irregular column
{
const double old_value = entry->value ();
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]].entries.size(); ++q)
uncondensed.add (row,
lines[distribute[global_col]].entries[q].first,
// whole row into the
// chunks defined by the
// blocks
- for (unsigned int block_col=0; block_col<blocks; ++block_col)
+ for (size_type block_col=0; block_col<blocks; ++block_col)
{
for (typename SparseMatrix<number>::iterator
entry = uncondensed.block(block_row, block_col).begin(block_index.second);
entry != uncondensed.block(block_row, block_col).end(block_index.second);
++entry)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global (block_col, entry->column());
if (distribute[global_col] ==
- numbers::invalid_unsigned_int)
+ numbers::invalid_size_type)
// distribute
// entry at
// irregular
{
const double old_value = entry->value();
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
uncondensed.add (lines[distribute[row]].entries[q].first,
global_col,
{
const double old_value = entry->value ();
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
{
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
uncondensed.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first,
old_value *
// take care of vector
if (use_vectors == true)
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
vec(lines[distribute[row]].entries[q].first)
+= (vec(row) * lines[distribute[row]].entries[q].second);
// TODO: in general we should iterate over the constraints and not over all DoFs
// for performance reasons
template<class VEC>
- void set_zero_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, unsigned int shift = 0)
+ void set_zero_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, size_type shift = 0)
{
Assert(!vec.has_ghost_elements(), ExcInternalError());//ExcGhostsPresent());
- const unsigned int
+ const size_type
start = vec.local_range().first,
end = vec.local_range().second;
- for (unsigned int i=start; i<end; ++i)
+ for (size_type i=start; i<end; ++i)
if (cm.is_constrained (shift + i))
vec(i) = 0;
}
template<class VEC>
void set_zero_in_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, internal::bool2type<true>)
{
- unsigned int start_shift = 0;
- for (unsigned int j=0; j<vec.n_blocks(); ++j)
+ size_type start_shift = 0;
+ for (size_type j=0; j<vec.n_blocks(); ++j)
{
set_zero_parallel(cm, vec.block(j), start_shift);
start_shift += vec.block(j).size();
/* std::vector<dealii::ConstraintMatrix::ConstraintLine>::const_iterator constraint_line = cm.lines.begin();
for (; constraint_line!=cm.lines.end(); ++constraint_line)
vec(constraint_line->line) = 0.;*/
- for (unsigned int i=0; i<vec.size(); ++i)
+ for (size_type i=0; i<vec.size(); ++i)
if (cm.is_constrained (i))
vec(i) = 0;
}
void
ConstraintMatrix::
distribute_local_to_global (const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
+ const std::vector<size_type> &local_dof_indices,
VectorType &global_vector,
const FullMatrix<double> &local_matrix) const
{
AssertDimension (local_matrix.m(), local_dof_indices.size());
AssertDimension (local_matrix.n(), local_dof_indices.size());
- const unsigned int n_local_dofs = local_vector.size();
+ const size_type n_local_dofs = local_vector.size();
if (lines.empty())
global_vector.add(local_dof_indices, local_vector);
else
- for (unsigned int i=0; i<n_local_dofs; ++i)
+ for (size_type i=0; i<n_local_dofs; ++i)
{
// check whether the current index is
// constrained. if not, just write the entry
// find the constraint line to the given
// global dof index
- const unsigned int line_index = calculate_line_index (local_dof_indices[i]);
+ const size_type line_index = calculate_line_index (local_dof_indices[i]);
const ConstraintLine *position =
lines_cache.size() <= line_index ? 0 : &lines[lines_cache[line_index]];
// constrained. If so, distribute the constraint
const double val = position->inhomogeneity;
if (val != 0)
- for (unsigned int j=0; j<n_local_dofs; ++j)
+ for (size_type j=0; j<n_local_dofs; ++j)
if (is_constrained(local_dof_indices[j]) == false)
global_vector(local_dof_indices[j]) -= val * local_matrix(j,i);
else
const ConstraintLine &position_j =
lines[lines_cache[calculate_line_index(local_dof_indices[j])]];
- for (unsigned int q=0; q<position_j.entries.size(); ++q)
+ for (size_type q=0; q<position_j.entries.size(); ++q)
{
Assert (!(!local_lines.size()
|| local_lines.is_element(position_j.entries[q].first))
// now distribute the constraint,
// but make sure we don't touch
// the entries of fixed dofs
- for (unsigned int j=0; j<position->entries.size(); ++j)
+ for (size_type j=0; j<position->entries.size(); ++j)
{
Assert (!(!local_lines.size()
|| local_lines.is_element(position->entries[j].first))
old_line.reserve (uncondensed.size());
std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- unsigned int shift = 0;
- unsigned int n_rows = uncondensed.size();
+ size_type shift = 0;
+ size_type n_rows = uncondensed.size();
if (next_constraint == lines.end())
// if no constraint is to be handled
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
old_line.push_back (row);
else
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
if (row == next_constraint->line)
{
// this line is constrained
// nothing more to do; finish rest
// of loop
{
- for (unsigned int i=row+1; i<n_rows; ++i)
+ for (size_type i=row+1; i<n_rows; ++i)
old_line.push_back (i-shift);
break;
};
// only evaluated so often as there are
// entries in new_line[*] which tells us
// which constraints exist
- for (unsigned int line=0; line<uncondensed.size(); ++line)
+ for (size_type line=0; line<uncondensed.size(); ++line)
if (old_line[line] != -1)
// line was not condensed away
uncondensed(line) = condensed(old_line[line]);
uncondensed(line) = next_constraint->inhomogeneity;
// then add the different
// contributions
- for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
+ for (size_type i=0; i<next_constraint->entries.size(); ++i)
uncondensed(line) += (condensed(old_line[next_constraint->entries[i].first]) *
next_constraint->entries[i].second);
++next_constraint;
// different contributions
typename VectorType::value_type
new_value = next_constraint->inhomogeneity;
- for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
+ for (size_type i=0; i<next_constraint->entries.size(); ++i)
new_value += (static_cast<typename VectorType::value_type>
(vec(next_constraint->entries[i].first)) *
next_constraint->entries[i].second);
// comes from a constraint.
struct Distributing
{
- Distributing (const unsigned int global_row = numbers::invalid_unsigned_int,
- const unsigned int local_row = numbers::invalid_unsigned_int);
+ Distributing (const size_type global_row = numbers::invalid_size_type,
+ const size_type local_row = numbers::invalid_size_type);
Distributing (const Distributing &in);
Distributing &operator = (const Distributing &in);
bool operator < (const Distributing &in) const
return global_row<in.global_row;
};
- unsigned int global_row;
- unsigned int local_row;
- mutable unsigned int constraint_position;
+ size_type global_row;
+ size_type local_row;
+ mutable size_type constraint_position;
};
inline
- Distributing::Distributing (const unsigned int global_row,
- const unsigned int local_row) :
+ Distributing::Distributing (const size_type global_row,
+ const size_type local_row) :
global_row (global_row),
local_row (local_row),
- constraint_position (numbers::invalid_unsigned_int) {}
+ constraint_position (numbers::invalid_size_type) {}
inline
Distributing::Distributing (const Distributing &in)
:
- constraint_position (numbers::invalid_unsigned_int)
+ constraint_position (numbers::invalid_size_type)
{
*this = (in);
}
local_row = in.local_row;
// the constraints pointer should not
// contain any data here.
- Assert (constraint_position == numbers::invalid_unsigned_int,
+ Assert (constraint_position == numbers::invalid_size_type,
ExcInternalError());
- if (in.constraint_position != numbers::invalid_unsigned_int)
+ if (in.constraint_position != numbers::invalid_size_type)
{
constraint_position = in.constraint_position;
- in.constraint_position = numbers::invalid_unsigned_int;
+ in.constraint_position = numbers::invalid_size_type;
}
return *this;
}
{
Assert (element_size == 0, ExcInternalError());
element_size = 6;
- data = new std::pair<unsigned int,double> [20*6];
+ data = new std::pair<size_type,double> [20*6];
individual_size.resize(20);
n_used_elements = 0;
}
- unsigned int element_size;
+ size_type element_size;
- std::pair<unsigned int,double> *data;
+ std::pair<size_type,double> *data;
- std::vector<unsigned int> individual_size;
+ std::vector<size_type> individual_size;
- unsigned int n_used_elements;
+ size_type n_used_elements;
- unsigned int insert_new_index (const std::pair<unsigned int,double> &pair)
+ size_type insert_new_index (const std::pair<size_type,double> &pair)
{
if (element_size == 0)
reinit();
if (n_used_elements == individual_size.size())
{
- std::pair<unsigned int,double> *new_data =
- new std::pair<unsigned int,double> [2*individual_size.size()*element_size];
+ std::pair<size_type,double> *new_data =
+ new std::pair<size_type,double> [2*individual_size.size()*element_size];
memcpy (new_data, data, individual_size.size()*element_size*
- sizeof(std::pair<unsigned int,double>));
+ sizeof(std::pair<size_type,double>));
delete [] data;
data = new_data;
individual_size.resize (2*individual_size.size(), 0);
}
- unsigned int index = n_used_elements;
+ size_type index = n_used_elements;
data[index*element_size] = pair;
individual_size[index] = 1;
++n_used_elements;
return index;
}
- void append_index (const unsigned int index,
- const std::pair<unsigned int,double> &pair)
+ void append_index (const size_type index,
+ const std::pair<size_type,double> &pair)
{
AssertIndexRange (index, n_used_elements);
- const unsigned int my_size = individual_size[index];
+ const size_type my_size = individual_size[index];
if (my_size == element_size)
{
- std::pair<unsigned int,double> *new_data =
- new std::pair<unsigned int,double> [2*individual_size.size()*element_size];
- for (unsigned int i=0; i<n_used_elements; ++i)
+ std::pair<size_type,double> *new_data =
+ new std::pair<size_type,double> [2*individual_size.size()*element_size];
+ for (size_type i=0; i<n_used_elements; ++i)
memcpy (&new_data[i*element_size*2], &data[i*element_size],
- element_size*sizeof(std::pair<unsigned int,double>));
+ element_size*sizeof(std::pair<size_type,double>));
delete [] data;
data = new_data;
element_size *= 2;
individual_size[index]++;
}
- unsigned int
- get_size (const unsigned int index) const
+ size_type
+ get_size (const size_type index) const
{
return individual_size[index];
}
- const std::pair<unsigned int,double> *
- get_entry (const unsigned int index) const
+ const std::pair<size_type,double> *
+ get_entry (const size_type index) const
{
return &data[index*element_size];
}
class GlobalRowsFromLocal
{
public:
- GlobalRowsFromLocal (const unsigned int n_local_rows)
+ GlobalRowsFromLocal (const size_type n_local_rows)
:
total_row_indices (n_local_rows),
n_active_rows (n_local_rows),
// implemented below
- void insert_index (const unsigned int global_row,
- const unsigned int local_row,
+ void insert_index (const size_type global_row,
+ const size_type local_row,
const double constraint_value);
void sort ();
<< "Constr rows " << n_constraints() << std::endl
<< "Inhom rows " << n_inhomogeneous_rows << std::endl
<< "Local: ";
- for (unsigned int i=0 ; i<total_row_indices.size() ; ++i)
+ for (size_type i=0 ; i<total_row_indices.size() ; ++i)
os << ' ' << std::setw(4) << total_row_indices[i].local_row;
os << std::endl
<< "Global:";
- for (unsigned int i=0 ; i<total_row_indices.size() ; ++i)
+ for (size_type i=0 ; i<total_row_indices.size() ; ++i)
os << ' ' << std::setw(4) << total_row_indices[i].global_row;
os << std::endl
<< "ConPos:";
- for (unsigned int i=0 ; i<total_row_indices.size() ; ++i)
+ for (size_type i=0 ; i<total_row_indices.size() ; ++i)
os << ' ' << std::setw(4) << total_row_indices[i].constraint_position;
os << std::endl;
}
// returns the number of global indices in the
// struct
- unsigned int size () const
+ size_type size () const
{
return n_active_rows;
}
// returns the global index of the
// counter_index-th entry in the list
- unsigned int &global_row (const unsigned int counter_index)
+ size_type &global_row (const size_type counter_index)
{
return total_row_indices[counter_index].global_row;
}
// returns the number of constraints that are
// associated to the counter_index-th entry in
// the list
- unsigned int size (const unsigned int counter_index) const
+ size_type size (const size_type counter_index) const
{
return (total_row_indices[counter_index].constraint_position ==
- numbers::invalid_unsigned_int ?
+ numbers::invalid_size_type ?
0 :
data_cache.get_size(total_row_indices[counter_index].
constraint_position));
// returns the global row associated with the
// counter_index-th entry in the list
- const unsigned int &global_row (const unsigned int counter_index) const
+ const size_type &global_row (const size_type counter_index) const
{
return total_row_indices[counter_index].global_row;
}
// returns the local row in the cell matrix
// associated with the counter_index-th entry
- // in the list. Returns invalid_unsigned_int
- // for invalid unsigned ints
- const unsigned int &local_row (const unsigned int counter_index) const
+ // in the list. Returns invalid_size_type
+ // for invalid size_types
+ const size_type &local_row (const size_type counter_index) const
{
return total_row_indices[counter_index].local_row;
}
// writable index
- unsigned int &local_row (const unsigned int counter_index)
+ size_type &local_row (const size_type counter_index)
{
return total_row_indices[counter_index].local_row;
}
// associated with the counter_index-th entry
// in the list in the index_in_constraint-th
// position of constraints
- unsigned int local_row (const unsigned int counter_index,
- const unsigned int index_in_constraint) const
+ size_type local_row (const size_type counter_index,
+ const size_type index_in_constraint) const
{
return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
[index_in_constraint]).first;
// counter_index-th entry in the list in the
// index_in_constraint-th position of
// constraints
- double constraint_value (const unsigned int counter_index,
- const unsigned int index_in_constraint) const
+ double constraint_value (const size_type counter_index,
+ const size_type index_in_constraint) const
{
return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
[index_in_constraint]).second;
// constrained. This means that
// there is one less nontrivial
// row
- void insert_constraint (const unsigned int constrained_local_dof)
+ void insert_constraint (const size_type constrained_local_dof)
{
--n_active_rows;
total_row_indices[n_active_rows].local_row = constrained_local_dof;
// the matrix, but are needed in order
// to set matrix diagonals and resolve
// inhomogeneities
- unsigned int n_constraints () const
+ size_type n_constraints () const
{
return total_row_indices.size()-n_active_rows;
}
// returns the number of constrained
// dofs in the structure that have an
// inhomogeneity
- unsigned int n_inhomogeneities () const
+ size_type n_inhomogeneities () const
{
return n_inhomogeneous_rows;
}
// sides, so to have fast access to
// them, put them before homogeneous
// constraints
- void set_ith_constraint_inhomogeneous (const unsigned int i)
+ void set_ith_constraint_inhomogeneous (const size_type i)
{
Assert (i >= n_inhomogeneous_rows, ExcInternalError());
std::swap (total_row_indices[n_active_rows+i],
// easily when the
// GlobalRowsToLocal has been
// set up
- unsigned int constraint_origin (unsigned int i) const
+ size_type constraint_origin (size_type i) const
{
return total_row_indices[n_active_rows+i].local_row;
}
// how many rows there are,
// constraints disregarded
- unsigned int n_active_rows;
+ size_type n_active_rows;
// the number of rows with
// inhomogeneous constraints
- unsigned int n_inhomogeneous_rows;
+ size_type n_inhomogeneous_rows;
};
// a function that appends an additional
// row to the list of values, or appends a
// value to an already existing
// row. Similar functionality as for
- // std::map<unsigned int,Distributing>, but
+ // std::map<size_type,Distributing>, but
// here done for a
// std::vector<Distributing>, much faster
// for short lists as we have them here
inline
void
- GlobalRowsFromLocal::insert_index (const unsigned int global_row,
- const unsigned int local_row,
- const double constraint_value)
+ GlobalRowsFromLocal::insert_index (const size_type global_row,
+ const size_type local_row,
+ const double constraint_value)
{
typedef std::vector<Distributing>::iterator index_iterator;
index_iterator pos, pos1;
Distributing row_value (global_row);
- std::pair<unsigned int,double> constraint (local_row, constraint_value);
+ std::pair<size_type,double> constraint (local_row, constraint_value);
// check whether the list was really
// sorted before entering here
- for (unsigned int i=1; i<n_active_rows; ++i)
+ for (size_type i=1; i<n_active_rows; ++i)
Assert (total_row_indices[i-1] < total_row_indices[i], ExcInternalError());
pos = Utilities::lower_bound (total_row_indices.begin(),
++n_active_rows;
}
- if (pos1->constraint_position == numbers::invalid_unsigned_int)
+ if (pos1->constraint_position == numbers::invalid_size_type)
pos1->constraint_position = data_cache.insert_new_index (constraint);
else
data_cache.append_index (pos1->constraint_position, constraint);
void
GlobalRowsFromLocal::sort ()
{
- unsigned int i, j, j2, temp, templ, istep;
- unsigned int step;
+ size_type i, j, j2, temp, templ, istep;
+ size_type step;
// check whether the
// constraints are really empty.
- const unsigned int length = size();
+ const size_type length = size();
// make sure that we are in the
// range of the vector
AssertIndexRange (length, total_row_indices.size()+1);
- for (unsigned int i=0; i<length; ++i)
+ for (size_type i=0; i<length; ++i)
Assert (total_row_indices[i].constraint_position ==
- numbers::invalid_unsigned_int,
+ numbers::invalid_size_type,
ExcInternalError());
step = length/2;
template <class BlockType>
inline
void
- make_block_starts (const BlockType &block_object,
- GlobalRowsFromLocal &global_rows,
- std::vector<unsigned int> &block_starts)
+ make_block_starts (const BlockType &block_object,
+ GlobalRowsFromLocal &global_rows,
+ std::vector<size_type> &block_starts)
{
AssertDimension (block_starts.size(), block_object.n_block_rows()+1);
typedef std::vector<Distributing>::iterator row_iterator;
row_iterator block_indices = global_rows.total_row_indices.begin();
- const unsigned int num_blocks = block_object.n_block_rows();
- const unsigned int n_active_rows = global_rows.size();
+ const size_type num_blocks = block_object.n_block_rows();
+ const size_type n_active_rows = global_rows.size();
// find end of rows.
block_starts[0] = 0;
- for (unsigned int i=1; i<num_blocks; ++i)
+ for (size_type i=1; i<num_blocks; ++i)
{
row_iterator first_block =
Utilities::lower_bound (block_indices,
// transform row indices to block-local
// index space
- for (unsigned int i=block_starts[1]; i<n_active_rows; ++i)
+ for (size_type i=block_starts[1]; i<n_active_rows; ++i)
global_rows.global_row(i) = block_object.get_row_indices().
global_to_local(global_rows.global_row(i)).second;
}
template <class BlockType>
inline
void
- make_block_starts (const BlockType &block_object,
- std::vector<unsigned int> &row_indices,
- std::vector<unsigned int> &block_starts)
+ make_block_starts (const BlockType &block_object,
+ std::vector<size_type> &row_indices,
+ std::vector<size_type> &block_starts)
{
AssertDimension (block_starts.size(), block_object.n_block_rows()+1);
- typedef std::vector<unsigned int>::iterator row_iterator;
+ typedef std::vector<size_type>::iterator row_iterator;
row_iterator col_indices = row_indices.begin();
- const unsigned int num_blocks = block_object.n_block_rows();
+ const size_type num_blocks = block_object.n_block_rows();
// find end of rows.
block_starts[0] = 0;
- for (unsigned int i=1; i<num_blocks; ++i)
+ for (size_type i=1; i<num_blocks; ++i)
{
row_iterator first_block =
Utilities::lower_bound (col_indices,
// transform row indices to local index
// space
- for (unsigned int i=block_starts[1]; i<row_indices.size(); ++i)
+ for (size_type i=block_starts[1]; i<row_indices.size(); ++i)
row_indices[i] = block_object.get_row_indices().
global_to_local(row_indices[i]).second;
}
static inline
double resolve_matrix_entry (const GlobalRowsFromLocal &global_rows,
const GlobalRowsFromLocal &global_cols,
- const unsigned int i,
- const unsigned int j,
- const unsigned int loc_row,
+ const size_type i,
+ const size_type j,
+ const size_type loc_row,
const FullMatrix<double> &local_matrix)
{
- const unsigned int loc_col = global_cols.local_row(j);
+ const size_type loc_col = global_cols.local_row(j);
double col_val;
// case 1: row has direct contribution in
// local matrix. decide whether col has a
// direct contribution. if not,
// set the value to zero.
- if (loc_row != numbers::invalid_unsigned_int)
+ if (loc_row != numbers::invalid_size_type)
{
- col_val = ((loc_col != numbers::invalid_unsigned_int) ?
+ col_val = ((loc_col != numbers::invalid_size_type) ?
local_matrix(loc_row, loc_col) : 0);
// account for indirect contributions by
// constraints in column
- for (unsigned int p=0; p<global_cols.size(j); ++p)
+ for (size_type p=0; p<global_cols.size(j); ++p)
col_val += (local_matrix(loc_row, global_cols.local_row(j,p)) *
global_cols.constraint_value(j,p));
}
// constraints in row, going trough the
// direct and indirect references in the
// given column.
- for (unsigned int q=0; q<global_rows.size(i); ++q)
+ for (size_type q=0; q<global_rows.size(i); ++q)
{
- double add_this = (loc_col != numbers::invalid_unsigned_int)
+ double add_this = (loc_col != numbers::invalid_size_type)
? local_matrix(global_rows.local_row(i,q), loc_col) : 0;
- for (unsigned int p=0; p<global_cols.size(j); ++p)
+ for (size_type p=0; p<global_cols.size(j); ++p)
add_this += (local_matrix(global_rows.local_row(i,q),
global_cols.local_row(j,p))
*
void
resolve_matrix_row (const GlobalRowsFromLocal &global_rows,
const GlobalRowsFromLocal &global_cols,
- const unsigned int i,
- const unsigned int column_start,
- const unsigned int column_end,
- const FullMatrix<double> &local_matrix,
- unsigned int *&col_ptr,
- number *&val_ptr)
+ const size_type i,
+ const size_type column_start,
+ const size_type column_end,
+ const FullMatrix<double> &local_matrix,
+ size_type *&col_ptr,
+ number *&val_ptr)
{
if (column_end == column_start)
return;
AssertIndexRange (column_end-1, global_cols.size());
- const unsigned int loc_row = global_rows.local_row(i);
+ const size_type loc_row = global_rows.local_row(i);
// fast function if there are no indirect
// references to any of the local rows at
AssertIndexRange(loc_row, local_matrix.m());
const double *matrix_ptr = &local_matrix(loc_row, 0);
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
- const unsigned int loc_col = global_cols.local_row(j);
+ const size_type loc_col = global_cols.local_row(j);
AssertIndexRange(loc_col, local_matrix.n());
const double col_val = matrix_ptr[loc_col];
if (col_val != 0.)
// to do some more checks.
else
{
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
double col_val = resolve_matrix_entry (global_rows, global_cols, i, j,
loc_row, local_matrix);
template <typename SparseMatrixIterator>
static inline
void add_value (const double value,
- const unsigned int row,
- const unsigned int column,
+ const size_type row,
+ const size_type column,
SparseMatrixIterator &matrix_values)
{
if (value != 0.)
inline
void
resolve_matrix_row (const GlobalRowsFromLocal &global_rows,
- const unsigned int i,
- const unsigned int column_start,
- const unsigned int column_end,
- const FullMatrix<double> &local_matrix,
- SparseMatrix<number> *sparse_matrix)
+ const size_type i,
+ const size_type column_start,
+ const size_type column_end,
+ const FullMatrix<double> &local_matrix,
+ SparseMatrix<number> *sparse_matrix)
{
if (column_end == column_start)
return;
if (sparsity.n_nonzero_elements() == 0)
return;
- const unsigned int row = global_rows.global_row(i);
- const unsigned int loc_row = global_rows.local_row(i);
+ const size_type row = global_rows.global_row(i);
+ const size_type loc_row = global_rows.local_row(i);
typename SparseMatrix<number>::iterator
matrix_values = sparse_matrix->begin(row);
AssertIndexRange (loc_row, local_matrix.m());
const double *matrix_ptr = &local_matrix(loc_row, 0);
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
- const unsigned int loc_col = global_rows.local_row(j);
+ const size_type loc_col = global_rows.local_row(j);
const double col_val = matrix_ptr[loc_col];
dealiiSparseMatrix::add_value (col_val, row,
global_rows.global_row(j),
}
else
{
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
loc_row, local_matrix);
const double *matrix_ptr = &local_matrix(loc_row, 0);
sparse_matrix->begin(row)->value() += matrix_ptr[loc_row];
- for (unsigned int j=column_start; j<i; ++j)
+ for (size_type j=column_start; j<i; ++j)
{
- const unsigned int loc_col = global_rows.local_row(j);
+ const size_type loc_col = global_rows.local_row(j);
const double col_val = matrix_ptr[loc_col];
dealiiSparseMatrix::add_value(col_val, row,
global_rows.global_row(j),
matrix_values);
}
- for (unsigned int j=i+1; j<column_end; ++j)
+ for (size_type j=i+1; j<column_end; ++j)
{
- const unsigned int loc_col = global_rows.local_row(j);
+ const size_type loc_col = global_rows.local_row(j);
const double col_val = matrix_ptr[loc_col];
dealiiSparseMatrix::add_value(col_val, row,
global_rows.global_row(j),
sparse_matrix->begin(row)->value() +=
resolve_matrix_entry (global_rows, global_rows, i, i,
loc_row, local_matrix);
- for (unsigned int j=column_start; j<i; ++j)
+ for (size_type j=column_start; j<i; ++j)
{
double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
loc_row, local_matrix);
global_rows.global_row(j),
matrix_values);
}
- for (unsigned int j=i+1; j<column_end; ++j)
+ for (size_type j=i+1; j<column_end; ++j)
{
double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
loc_row, local_matrix);
AssertIndexRange (loc_row, local_matrix.m());
const double *matrix_ptr = &local_matrix(loc_row, 0);
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
- const unsigned int loc_col = global_rows.local_row(j);
+ const size_type loc_col = global_rows.local_row(j);
const double col_val = matrix_ptr[loc_col];
if (row==global_rows.global_row(j))
sparse_matrix->begin(row)->value() += col_val;
else
{
++matrix_values; // jump over diagonal element
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
double col_val = resolve_matrix_entry (global_rows, global_rows, i,
j, loc_row, local_matrix);
inline
void
resolve_matrix_row (const GlobalRowsFromLocal &global_rows,
- const unsigned int i,
- const unsigned int column_start,
- const unsigned int column_end,
+ const size_type i,
+ const size_type column_start,
+ const size_type column_end,
const Table<2,bool> &dof_mask,
- std::vector<unsigned int>::iterator &col_ptr)
+ std::vector<size_type>::iterator &col_ptr)
{
if (column_end == column_start)
return;
- const unsigned int loc_row = global_rows.local_row(i);
+ const size_type loc_row = global_rows.local_row(i);
// fast function if there are no indirect
// references to any of the local rows at
Assert(loc_row < dof_mask.n_rows(),
ExcInternalError());
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
- const unsigned int loc_col = global_rows.local_row(j);
+ const size_type loc_col = global_rows.local_row(j);
Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
if (dof_mask(loc_row,loc_col) == true)
// to do some more checks.
else
{
- for (unsigned int j=column_start; j<column_end; ++j)
+ for (size_type j=column_start; j<column_end; ++j)
{
- const unsigned int loc_col = global_rows.local_row(j);
- if (loc_row != numbers::invalid_unsigned_int)
+ const size_type loc_col = global_rows.local_row(j);
+ if (loc_row != numbers::invalid_size_type)
{
Assert (loc_row < dof_mask.n_rows(), ExcInternalError());
- if (loc_col != numbers::invalid_unsigned_int)
+ if (loc_col != numbers::invalid_size_type)
{
Assert (loc_col < dof_mask.n_cols(), ExcInternalError());
if (dof_mask(loc_row,loc_col) == true)
goto add_this_index;
}
- for (unsigned int p=0; p<global_rows.size(j); ++p)
+ for (size_type p=0; p<global_rows.size(j); ++p)
if (dof_mask(loc_row,global_rows.local_row(j,p)) == true)
goto add_this_index;
}
- for (unsigned int q=0; q<global_rows.size(i); ++q)
+ for (size_type q=0; q<global_rows.size(i); ++q)
{
- if (loc_col != numbers::invalid_unsigned_int)
+ if (loc_col != numbers::invalid_size_type)
{
Assert (loc_col < dof_mask.n_cols(), ExcInternalError());
if (dof_mask(global_rows.local_row(i,q),loc_col) == true)
goto add_this_index;
}
- for (unsigned int p=0; p<global_rows.size(j); ++p)
+ for (size_type p=0; p<global_rows.size(j); ++p)
if (dof_mask(global_rows.local_row(i,q),
global_rows.local_row(j,p)) == true)
goto add_this_index;
template <typename MatrixType, typename VectorType>
inline void
set_matrix_diagonals (const internals::GlobalRowsFromLocal &global_rows,
- const std::vector<unsigned int> &local_dof_indices,
+ const std::vector<size_type> &local_dof_indices,
const FullMatrix<double> &local_matrix,
const ConstraintMatrix &constraints,
MatrixType &global_matrix,
if (global_rows.n_constraints() > 0)
{
double average_diagonal = 0;
- for (unsigned int i=0; i<local_matrix.m(); ++i)
+ for (size_type i=0; i<local_matrix.m(); ++i)
average_diagonal += std::fabs (local_matrix(i,i));
average_diagonal /= static_cast<double>(local_matrix.m());
- for (unsigned int i=0; i<global_rows.n_constraints(); i++)
+ for (size_type i=0; i<global_rows.n_constraints(); i++)
{
- const unsigned int local_row = global_rows.constraint_origin(i);
- const unsigned int global_row = local_dof_indices[local_row];
+ const size_type local_row = global_rows.constraint_origin(i);
+ const size_type global_row = local_dof_indices[local_row];
const typename MatrixType::value_type new_diagonal
= (std::fabs(local_matrix(local_row,local_row)) != 0 ?
std::fabs(local_matrix(local_row,local_row)) : average_diagonal);
template <typename SparsityType>
inline void
set_sparsity_diagonals (const internals::GlobalRowsFromLocal &global_rows,
- const std::vector<unsigned int> &local_dof_indices,
+ const std::vector<size_type> &local_dof_indices,
const Table<2,bool> &dof_mask,
const bool keep_constrained_entries,
SparsityType &sparsity_pattern)
// that have been left out above
if (global_rows.n_constraints() > 0)
{
- for (unsigned int i=0; i<global_rows.n_constraints(); i++)
+ for (size_type i=0; i<global_rows.n_constraints(); i++)
{
- const unsigned int local_row = global_rows.constraint_origin(i);
- const unsigned int global_row = local_dof_indices[local_row];
+ const size_type local_row = global_rows.constraint_origin(i);
+ const size_type global_row = local_dof_indices[local_row];
if (keep_constrained_entries == true)
{
- for (unsigned int j=0; j<local_dof_indices.size(); ++j)
+ for (size_type j=0; j<local_dof_indices.size(); ++j)
{
if (dof_mask(local_row,j) == true)
sparsity_pattern.add(global_row,
// are related to it.
void
ConstraintMatrix::
-make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
+make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
internals::GlobalRowsFromLocal &global_rows) const
{
- const unsigned int n_local_dofs = local_dof_indices.size();
+ const size_type n_local_dofs = local_dof_indices.size();
AssertDimension (n_local_dofs, global_rows.size());
// when distributing the local data to
// cache whether we have to resolve any
// indirect rows generated from resolving
// constrained dofs.
- unsigned int added_rows = 0;
+ size_type added_rows = 0;
// first add the indices in an unsorted
// way and only keep track of the
// constraints that appear. They are
// resolved in a second step.
- for (unsigned int i = 0; i<n_local_dofs; ++i)
+ for (size_type i = 0; i<n_local_dofs; ++i)
{
if (is_constrained(local_dof_indices[i]) == false)
{
}
global_rows.sort();
- const unsigned int n_constrained_rows = n_local_dofs-added_rows;
- for (unsigned int i=0; i<n_constrained_rows; ++i)
+ const size_type n_constrained_rows = n_local_dofs-added_rows;
+ for (size_type i=0; i<n_constrained_rows; ++i)
{
- const unsigned int local_row = global_rows.constraint_origin(i);
+ const size_type local_row = global_rows.constraint_origin(i);
AssertIndexRange(local_row, n_local_dofs);
- const unsigned int global_row = local_dof_indices[local_row];
+ const size_type global_row = local_dof_indices[local_row];
Assert (is_constrained(global_row), ExcInternalError());
const ConstraintLine &position =
lines[lines_cache[calculate_line_index(global_row)]];
if (position.inhomogeneity != 0)
global_rows.set_ith_constraint_inhomogeneous (i);
- for (unsigned int q=0; q<position.entries.size(); ++q)
+ for (size_type q=0; q<position.entries.size(); ++q)
global_rows.insert_index (position.entries[q].first,
local_row,
position.entries[q].second);
inline
void
ConstraintMatrix::
-make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
- std::vector<unsigned int> &active_dofs) const
+make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
+ std::vector<size_type> &active_dofs) const
{
- const unsigned int n_local_dofs = local_dof_indices.size();
- unsigned int added_rows = 0;
- for (unsigned int i = 0; i<n_local_dofs; ++i)
+ const size_type n_local_dofs = local_dof_indices.size();
+ size_type added_rows = 0;
+ for (size_type i = 0; i<n_local_dofs; ++i)
{
if (is_constrained(local_dof_indices[i]) == false)
{
}
std::sort (active_dofs.begin(), active_dofs.begin()+added_rows);
- const unsigned int n_constrained_dofs = n_local_dofs-added_rows;
- for (unsigned int i=n_constrained_dofs; i>0; --i)
+ const size_type n_constrained_dofs = n_local_dofs-added_rows;
+ for (size_type i=n_constrained_dofs; i>0; --i)
{
- const unsigned int local_row = active_dofs.back();
+ const size_type local_row = active_dofs.back();
// remove constrained entry since we
// are going to resolve it in place
active_dofs.pop_back();
- const unsigned int global_row = local_dof_indices[local_row];
+ const size_type global_row = local_dof_indices[local_row];
const ConstraintLine &position =
lines[lines_cache[calculate_line_index(global_row)]];
- for (unsigned int q=0; q<position.entries.size(); ++q)
+ for (size_type q=0; q<position.entries.size(); ++q)
{
- const unsigned int new_index = position.entries[q].first;
+ const size_type new_index = position.entries[q].first;
if (active_dofs[active_dofs.size()-i] < new_index)
active_dofs.insert(active_dofs.end()-i+1,new_index);
// the list sorted
else
{
- std::vector<unsigned int>::iterator it =
+ std::vector<size_type>::iterator it =
Utilities::lower_bound(active_dofs.begin(),
active_dofs.end()-i+1,
new_index);
inline
double
ConstraintMatrix::
-resolve_vector_entry (const unsigned int i,
+resolve_vector_entry (const size_type i,
const internals::GlobalRowsFromLocal &global_rows,
const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
+ const std::vector<size_type> &local_dof_indices,
const FullMatrix<double> &local_matrix) const
{
- const unsigned int loc_row = global_rows.local_row(i);
- const unsigned int n_inhomogeneous_rows = global_rows.n_inhomogeneities();
+ const size_type loc_row = global_rows.local_row(i);
+ const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities();
double val = 0;
// has a direct contribution from some local
// entry. If we have inhomogeneous
// constraints, compute the contribution of
// the inhomogeneity in the current row.
- if (loc_row != numbers::invalid_unsigned_int)
+ if (loc_row != numbers::invalid_size_type)
{
val = local_vector(loc_row);
- for (unsigned int i=0; i<n_inhomogeneous_rows; ++i)
+ for (size_type i=0; i<n_inhomogeneous_rows; ++i)
val -= (lines[lines_cache[calculate_line_index(local_dof_indices
[global_rows.constraint_origin(i)])]].
inhomogeneity *
}
// go through the indirect contributions
- for (unsigned int q=0; q<global_rows.size(i); ++q)
+ for (size_type q=0; q<global_rows.size(i); ++q)
{
- const unsigned int loc_row_q = global_rows.local_row(i,q);
+ const size_type loc_row_q = global_rows.local_row(i,q);
double add_this = local_vector (loc_row_q);
- for (unsigned int k=0; k<n_inhomogeneous_rows; ++k)
+ for (size_type k=0; k<n_inhomogeneous_rows; ++k)
add_this -= (lines[lines_cache[calculate_line_index
(local_dof_indices
[global_rows.constraint_origin(k)])]].
ConstraintMatrix::distribute_local_to_global (
const FullMatrix<double> &local_matrix,
const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
+ const std::vector<size_type> &local_dof_indices,
MatrixType &global_matrix,
VectorType &global_vector,
bool use_inhomogeneities_for_rhs,
}
Assert (lines.empty() || sorted == true, ExcMatrixNotClosed());
- const unsigned int n_local_dofs = local_dof_indices.size();
+ const size_type n_local_dofs = local_dof_indices.size();
internals::GlobalRowsFromLocal global_rows (n_local_dofs);
make_sorted_row_list (local_dof_indices, global_rows);
- const unsigned int n_actual_dofs = global_rows.size();
+ const size_type n_actual_dofs = global_rows.size();
// create arrays for the column data
// (indices and values) that will then be
// written into the matrix. Shortcut for
// deal.II sparse matrix
- std::vector<unsigned int> cols;
- std::vector<number> vals;
+ std::vector<size_type> cols;
+ std::vector<number> vals;
SparseMatrix<number> *sparse_matrix
= dynamic_cast<SparseMatrix<number> *>(&global_matrix);
if (use_dealii_matrix == false)
// the global rows that we will touch and
// call resolve_matrix_row for each of
// those.
- for (unsigned int i=0; i<n_actual_dofs; ++i)
+ for (size_type i=0; i<n_actual_dofs; ++i)
{
- const unsigned int row = global_rows.global_row(i);
+ const size_type row = global_rows.global_row(i);
// calculate all the data that will be
// written into the matrix row.
if (use_dealii_matrix == false)
{
- unsigned int *col_ptr = &cols[0];
+ size_type *col_ptr = &cols[0];
number *val_ptr = &vals[0];
internals::resolve_matrix_row (global_rows, global_rows, i, 0,
n_actual_dofs,
local_matrix, col_ptr, val_ptr);
- const unsigned int n_values = col_ptr - &cols[0];
- Assert (n_values == (unsigned int)(val_ptr - &vals[0]),
+ const size_type n_values = col_ptr - &cols[0];
+ Assert (n_values == (size_type)(val_ptr - &vals[0]),
ExcInternalError());
if (n_values > 0)
global_matrix.add(row, n_values, &cols[0], &vals[0], false, true);
template <typename MatrixType>
void
ConstraintMatrix::distribute_local_to_global (
- const FullMatrix<double> &local_matrix,
- const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- MatrixType &global_matrix) const
+ const FullMatrix<double> &local_matrix,
+ const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
+ MatrixType &global_matrix) const
{
typedef double number;
AssertDimension (local_matrix.n(), col_indices.size());
//Assert (sorted == true, ExcMatrixNotClosed());
- const unsigned int n_local_row_dofs = row_indices.size();
- const unsigned int n_local_col_dofs = col_indices.size();
+ const size_type n_local_row_dofs = row_indices.size();
+ const size_type n_local_col_dofs = col_indices.size();
internals::GlobalRowsFromLocal global_rows (n_local_row_dofs);
internals::GlobalRowsFromLocal global_cols (n_local_col_dofs);
make_sorted_row_list (row_indices, global_rows);
make_sorted_row_list (col_indices, global_cols);
- const unsigned int n_actual_row_dofs = global_rows.size();
- const unsigned int n_actual_col_dofs = global_cols.size();
+ const size_type n_actual_row_dofs = global_rows.size();
+ const size_type n_actual_col_dofs = global_cols.size();
// create arrays for the column data
// (indices and values) that will then be
// written into the matrix. Shortcut for
// deal.II sparse matrix
- std::vector<unsigned int> cols (n_actual_col_dofs);
+ std::vector<size_type> cols (n_actual_col_dofs);
std::vector<number> vals (n_actual_col_dofs);
// now do the actual job.
- for (unsigned int i=0; i<n_actual_row_dofs; ++i)
+ for (size_type i=0; i<n_actual_row_dofs; ++i)
{
- const unsigned int row = global_rows.global_row(i);
+ const size_type row = global_rows.global_row(i);
// calculate all the data that will be
// written into the matrix row.
- unsigned int *col_ptr = &cols[0];
+ size_type *col_ptr = &cols[0];
number *val_ptr = &vals[0];
internals::resolve_matrix_row (global_rows, global_cols, i, 0,
n_actual_col_dofs,
local_matrix, col_ptr, val_ptr);
- const unsigned int n_values = col_ptr - &cols[0];
- Assert (n_values == (unsigned int)(val_ptr - &vals[0]),
+ const size_type n_values = col_ptr - &cols[0];
+ Assert (n_values == (size_type)(val_ptr - &vals[0]),
ExcInternalError());
if (n_values > 0)
global_matrix.add(row, n_values, &cols[0], &vals[0], false, true);
template <typename MatrixType, typename VectorType>
void
ConstraintMatrix::
-distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs,
+distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<size_type> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs,
internal::bool2type<true>) const
{
const bool use_vectors = (local_vector.size() == 0 &&
}
Assert (sorted == true, ExcMatrixNotClosed());
- const unsigned int n_local_dofs = local_dof_indices.size();
+ const size_type n_local_dofs = local_dof_indices.size();
internals::GlobalRowsFromLocal global_rows (n_local_dofs);
make_sorted_row_list (local_dof_indices, global_rows);
- const unsigned int n_actual_dofs = global_rows.size();
+ const size_type n_actual_dofs = global_rows.size();
- std::vector<unsigned int> global_indices;
+ std::vector<size_type> global_indices;
if (use_vectors == true)
{
global_indices.resize(n_actual_dofs);
- for (unsigned int i=0; i<n_actual_dofs; ++i)
+ for (size_type i=0; i<n_actual_dofs; ++i)
global_indices[i] = global_rows.global_row(i);
}
// additional construct that also takes
// care of block indices.
- const unsigned int num_blocks = global_matrix.n_block_rows();
- std::vector<unsigned int> block_starts(num_blocks+1, n_actual_dofs);
+ const size_type num_blocks = global_matrix.n_block_rows();
+ std::vector<size_type> block_starts(num_blocks+1, n_actual_dofs);
internals::make_block_starts (global_matrix, global_rows, block_starts);
- std::vector<unsigned int> cols;
+ std::vector<size_type> cols;
std::vector<number> vals;
if (use_dealii_matrix == false)
{
// through the blocks of the matrix
// separately, which allows us to set the
// block entries individually
- for (unsigned int block=0; block<num_blocks; ++block)
+ for (size_type block=0; block<num_blocks; ++block)
{
- const unsigned int next_block = block_starts[block+1];
- for (unsigned int i=block_starts[block]; i<next_block; ++i)
+ const size_type next_block = block_starts[block+1];
+ for (size_type i=block_starts[block]; i<next_block; ++i)
{
- const unsigned int row = global_rows.global_row(i);
+ const size_type row = global_rows.global_row(i);
- for (unsigned int block_col=0; block_col<num_blocks; ++block_col)
+ for (size_type block_col=0; block_col<num_blocks; ++block_col)
{
- const unsigned int start_block = block_starts[block_col],
+ const size_type start_block = block_starts[block_col],
end_block = block_starts[block_col+1];
if (use_dealii_matrix == false)
{
- unsigned int *col_ptr = &cols[0];
+ size_type *col_ptr = &cols[0];
number *val_ptr = &vals[0];
internals::resolve_matrix_row (global_rows, global_rows, i,
start_block, end_block,
local_matrix, col_ptr, val_ptr);
- const unsigned int n_values = col_ptr - &cols[0];
- Assert (n_values == (unsigned int)(val_ptr - &vals[0]),
+ const size_type n_values = col_ptr - &cols[0];
+ Assert (n_values == (size_type )(val_ptr - &vals[0]),
ExcInternalError());
if (n_values > 0)
global_matrix.block(block, block_col).add(row, n_values,
template <typename SparsityType>
void
ConstraintMatrix::
-add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
+add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
internal::bool2type<false> ) const
{
Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic());
- const unsigned int n_local_dofs = local_dof_indices.size();
+ const size_type n_local_dofs = local_dof_indices.size();
bool dof_mask_is_active = false;
if (dof_mask.n_rows() == n_local_dofs)
{
// indices that come from constraints.
if (dof_mask_is_active == false)
{
- std::vector<unsigned int> actual_dof_indices (n_local_dofs);
+ std::vector<size_type> actual_dof_indices (n_local_dofs);
make_sorted_row_list (local_dof_indices, actual_dof_indices);
- const unsigned int n_actual_dofs = actual_dof_indices.size();
+ const size_type n_actual_dofs = actual_dof_indices.size();
// now add the indices we collected above
// to the sparsity pattern. Very easy
// here - just add the same array to all
// the rows...
- for (unsigned int i=0; i<n_actual_dofs; ++i)
+ for (size_type i=0; i<n_actual_dofs; ++i)
sparsity_pattern.add_entries(actual_dof_indices[i],
actual_dof_indices.begin(),
actual_dof_indices.end(),
// the nice matrix structure we use
// elsewhere, so manually add those
// indices one by one.
- for (unsigned int i=0; i<n_local_dofs; i++)
+ for (size_type i=0; i<n_local_dofs; i++)
if (is_constrained(local_dof_indices[i]))
{
if (keep_constrained_entries == true)
- for (unsigned int j=0; j<n_local_dofs; j++)
+ for (size_type j=0; j<n_local_dofs; j++)
{
sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
// for additional comments.
internals::GlobalRowsFromLocal global_rows (n_local_dofs);
make_sorted_row_list (local_dof_indices, global_rows);
- const unsigned int n_actual_dofs = global_rows.size();
+ const size_type n_actual_dofs = global_rows.size();
// create arrays for the column indices
// that will then be written into the
// sparsity pattern.
- std::vector<unsigned int> cols (n_actual_dofs);
+ std::vector<size_type> cols (n_actual_dofs);
- for (unsigned int i=0; i<n_actual_dofs; ++i)
+ for (size_type i=0; i<n_actual_dofs; ++i)
{
- std::vector<unsigned int>::iterator col_ptr = cols.begin();
- const unsigned int row = global_rows.global_row(i);
+ std::vector<size_type>::iterator col_ptr = cols.begin();
+ const size_type row = global_rows.global_row(i);
internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs,
dof_mask, col_ptr);
template <typename SparsityType>
void
ConstraintMatrix::
-add_entries_local_to_global (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
+add_entries_local_to_global (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices,
SparsityType &sparsity_pattern,
const bool keep_constrained_entries,
const Table<2,bool> &dof_mask) const
{
- const unsigned int n_local_rows = row_indices.size();
- const unsigned int n_local_cols = col_indices.size();
+ const size_type n_local_rows = row_indices.size();
+ const size_type n_local_cols = col_indices.size();
bool dof_mask_is_active = false;
if (dof_mask.n_rows() == n_local_rows && dof_mask.n_cols() == n_local_cols)
dof_mask_is_active = true;
// indices that come from constraints.
if (dof_mask_is_active == false)
{
- std::vector<unsigned int> actual_row_indices (n_local_rows);
- std::vector<unsigned int> actual_col_indices (n_local_cols);
+ std::vector<size_type> actual_row_indices (n_local_rows);
+ std::vector<size_type> actual_col_indices (n_local_cols);
make_sorted_row_list (row_indices, actual_row_indices);
make_sorted_row_list (col_indices, actual_col_indices);
- const unsigned int n_actual_rows = actual_row_indices.size();
+ const size_type n_actual_rows = actual_row_indices.size();
// now add the indices we collected above
// to the sparsity pattern. Very easy
// here - just add the same array to all
// the rows...
- for (unsigned int i=0; i<n_actual_rows; ++i)
+ for (size_type i=0; i<n_actual_rows; ++i)
sparsity_pattern.add_entries(actual_row_indices[i],
actual_col_indices.begin(),
actual_col_indices.end(),
// of those to the sparsity pattern
if (keep_constrained_entries == true)
{
- for (unsigned int i=0; i<row_indices.size(); i++)
+ for (size_type i=0; i<row_indices.size(); i++)
if (is_constrained(row_indices[i]))
- for (unsigned int j=0; j<col_indices.size(); j++)
+ for (size_type j=0; j<col_indices.size(); j++)
sparsity_pattern.add (row_indices[i], col_indices[j]);
- for (unsigned int i=0; i<col_indices.size(); i++)
+ for (size_type i=0; i<col_indices.size(); i++)
if (is_constrained(col_indices[i]))
- for (unsigned int j=0; j<row_indices.size(); j++)
+ for (size_type j=0; j<row_indices.size(); j++)
sparsity_pattern.add (row_indices[j], col_indices[i]);
}
template <typename SparsityType>
void
ConstraintMatrix::
-add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
+add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
internal::bool2type<true> ) const
{
// just as the other
Assert (sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(),
ExcNotQuadratic());
- const unsigned int n_local_dofs = local_dof_indices.size();
- const unsigned int num_blocks = sparsity_pattern.n_block_rows();
+ const size_type n_local_dofs = local_dof_indices.size();
+ const size_type num_blocks = sparsity_pattern.n_block_rows();
bool dof_mask_is_active = false;
if (dof_mask.n_rows() == n_local_dofs)
if (dof_mask_is_active == false)
{
- std::vector<unsigned int> actual_dof_indices (n_local_dofs);
+ std::vector<size_type> actual_dof_indices (n_local_dofs);
make_sorted_row_list (local_dof_indices, actual_dof_indices);
- const unsigned int n_actual_dofs = actual_dof_indices.size();
+ const size_type n_actual_dofs = actual_dof_indices.size();
// additional construct that also takes
// care of block indices.
- std::vector<unsigned int> block_starts(num_blocks+1, n_actual_dofs);
+ std::vector<size_type> block_starts(num_blocks+1, n_actual_dofs);
internals::make_block_starts (sparsity_pattern, actual_dof_indices,
block_starts);
- for (unsigned int block=0; block<num_blocks; ++block)
+ for (size_type block=0; block<num_blocks; ++block)
{
- const unsigned int next_block = block_starts[block+1];
- for (unsigned int i=block_starts[block]; i<next_block; ++i)
+ const size_type next_block = block_starts[block+1];
+ for (size_type i=block_starts[block]; i<next_block; ++i)
{
Assert (i<n_actual_dofs, ExcInternalError());
- const unsigned int row = actual_dof_indices[i];
+ const size_type row = actual_dof_indices[i];
Assert (row < sparsity_pattern.block(block,0).n_rows(),
ExcInternalError());
- std::vector<unsigned int>::iterator index_it = actual_dof_indices.begin();
- for (unsigned int block_col = 0; block_col<num_blocks; ++block_col)
+ std::vector<size_type>::iterator index_it = actual_dof_indices.begin();
+ for (size_type block_col = 0; block_col<num_blocks; ++block_col)
{
- const unsigned int next_block_col = block_starts[block_col+1];
+ const size_type next_block_col = block_starts[block_col+1];
sparsity_pattern.block(block,block_col).
add_entries(row,
index_it,
}
}
- for (unsigned int i=0; i<n_local_dofs; i++)
+ for (size_type i=0; i<n_local_dofs; i++)
if (is_constrained(local_dof_indices[i]))
{
if (keep_constrained_entries == true)
- for (unsigned int j=0; j<n_local_dofs; j++)
+ for (size_type j=0; j<n_local_dofs; j++)
{
sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
// function for block matrices
internals::GlobalRowsFromLocal global_rows (n_local_dofs);
make_sorted_row_list (local_dof_indices, global_rows);
- const unsigned int n_actual_dofs = global_rows.size();
+ const size_type n_actual_dofs = global_rows.size();
// additional construct that also takes
// care of block indices.
- std::vector<unsigned int> block_starts(num_blocks+1, n_actual_dofs);
+ std::vector<size_type> block_starts(num_blocks+1, n_actual_dofs);
internals::make_block_starts(sparsity_pattern, global_rows,
block_starts);
- std::vector<unsigned int> cols (n_actual_dofs);
+ std::vector<size_type> cols (n_actual_dofs);
// the basic difference to the
// non-block variant from now onwards
// is that we go through the blocks
// of the matrix separately.
- for (unsigned int block=0; block<num_blocks; ++block)
+ for (size_type block=0; block<num_blocks; ++block)
{
- const unsigned int next_block = block_starts[block+1];
- for (unsigned int i=block_starts[block]; i<next_block; ++i)
+ const size_type next_block = block_starts[block+1];
+ for (size_type i=block_starts[block]; i<next_block; ++i)
{
- const unsigned int row = global_rows.global_row(i);
- for (unsigned int block_col=0; block_col<num_blocks; ++block_col)
+ const size_type row = global_rows.global_row(i);
+ for (size_type block_col=0; block_col<num_blocks; ++block_col)
{
- const unsigned int begin_block = block_starts[block_col],
+ const size_type begin_block = block_starts[block_col],
end_block = block_starts[block_col+1];
- std::vector<unsigned int>::iterator col_ptr = cols.begin();
+ std::vector<size_type>::iterator col_ptr = cols.begin();
internals::resolve_matrix_row (global_rows, i, begin_block,
end_block, dof_mask, col_ptr);
class BlockVector : public BlockVectorBase<Vector<Number> >
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Typedef the base class for simpler
* access to its own typedefs.
* use blocks of different
* sizes.
*/
- explicit BlockVector (const unsigned int num_blocks = 0,
- const unsigned int block_size = 0);
+ explicit BlockVector (const size_type num_blocks = 0,
+ const size_type block_size = 0);
/**
* Copy-Constructor. Dimension set to
* <tt>block_sizes[i]</tt> zero
* elements.
*/
- BlockVector (const std::vector<unsigned int> &block_sizes);
+ BlockVector (const std::vector<size_type> &block_sizes);
/**
* Destructor. Clears memory
* If <tt>fast==false</tt>, the vector
* is filled with zeros.
*/
- void reinit (const unsigned int num_blocks,
- const unsigned int block_size = 0,
+ void reinit (const size_type num_blocks,
+ const size_type block_size = 0,
const bool fast = false);
/**
* since they may be routed to
* the wrong block.
*/
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
+ void reinit (const std::vector<size_type> &N,
+ const bool fast=false);
/**
* Change the dimension to that
template <typename Number>
inline
- BlockVector<Number>::BlockVector (const unsigned int n_blocks,
- const unsigned int block_size)
+ BlockVector<Number>::BlockVector (const size_type n_blocks,
+ const size_type block_size)
{
reinit (n_blocks, block_size);
}
template <typename Number>
inline
- BlockVector<Number>::BlockVector (const std::vector<unsigned int> &n)
+ BlockVector<Number>::BlockVector (const std::vector<size_type> &n)
{
reinit (n, false);
}
this->components.resize (v.n_blocks());
this->block_indices = v.block_indices;
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i] = v.components[i];
}
template <typename Number>
inline
- void BlockVector<Number>::reinit (const unsigned int n_bl,
- const unsigned int bl_sz,
+ void BlockVector<Number>::reinit (const size_type n_bl,
+ const size_type bl_sz,
const bool fast)
{
- std::vector<unsigned int> n(n_bl, bl_sz);
+ std::vector<size_type> n(n_bl, bl_sz);
reinit(n, fast);
}
template <typename Number>
inline
- void BlockVector<Number>::reinit (const std::vector<unsigned int> &n,
- const bool fast)
+ void BlockVector<Number>::reinit (const std::vector<size_type> &n,
+ const bool fast)
{
this->block_indices.reinit (n);
if (this->components.size() != this->n_blocks())
this->components.resize(this->n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i].reinit(n[i], fast);
}
if (this->components.size() != this->n_blocks())
this->components.resize(this->n_blocks());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->block(i).reinit(v.block(i), fast);
}
Assert (this->n_blocks() == v.n_blocks(),
ExcDimensionMismatch(this->n_blocks(), v.n_blocks()));
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
dealii::swap (this->components[i], v.components[i]);
dealii::swap (this->block_indices, v.block_indices);
}
Assert (numbers::is_finite(factor), ExcNumberNotFinite());
- for (unsigned int i=0; i<this->n_blocks(); ++i)
+ for (size_type i=0; i<this->n_blocks(); ++i)
this->components[i].scale(factor);
}
class Vector : public Subscriptor
{
public:
+ /**
+ * Declare the type for container size.
+ */
+ typedef std::size_t size_type;
+
/**
* Declare standard types used in all
* containers. These types parallel those in
* global size without any actual parallel
* distribution.
*/
- Vector (const unsigned int size);
+ Vector (const size_type size);
/**
* Constructs a parallel vector. The local
* size without any actual parallel
* distribution.
*/
- void reinit (const unsigned int size,
- const bool fast = false);
+ void reinit (const size_type size,
+ const bool fast = false);
/**
* Uses the parallel layout of the input
* equal to the sum of the number of locally
* owned indices among all the processors.
*/
- types::global_dof_index size () const;
+ size_type size () const;
/**
* Returns the local size of the vector, i.e.,
* the number of indices owned locally.
*/
- unsigned int local_size() const;
+ size_type local_size() const;
/**
* Returns the half-open interval that
* local_range().second -
* local_range().first</code>.
*/
- std::pair<types::global_dof_index, types::global_dof_index> local_range () const;
+ std::pair<size_type, size_type> local_range () const;
/**
* Returns true if the given global index is
* in the local range of this processor.
*/
- bool in_local_range (const types::global_dof_index global_index) const;
+ bool in_local_range (const size_type global_index) const;
/**
* Returns the number of ghost elements
* present on the vector.
*/
- unsigned int n_ghost_entries () const;
+ size_type n_ghost_entries () const;
/**
* Returns whether the given global index is a
* are owned locally and for indices not
* present at all.
*/
- bool is_ghost_entry (const types::global_dof_index global_index) const;
+ bool is_ghost_entry (const size_type global_index) const;
/**
* Make the @p Vector class a bit like
* vector or be specified as a ghost
* index at construction.
*/
- Number operator () (const types::global_dof_index global_index) const;
+ Number operator () (const size_type global_index) const;
/**
* Read and write access to the data
* vector or be specified as a ghost
* index at construction.
*/
- Number &operator () (const types::global_dof_index global_index);
+ Number &operator () (const size_type global_index);
/**
* Read access to the data in the
* This function does the same thing
* as operator().
*/
- Number operator [] (const types::global_dof_index global_index) const;
+ Number operator [] (const size_type global_index) const;
/**
* Read and write access to the data
* This function does the same thing
* as operator().
*/
- Number &operator [] (const types::global_dof_index global_index);
+ Number &operator [] (const size_type global_index);
/**
* Read access to the data field specified by
* <code>[local_size,local_size+
* n_ghost_entries]</code>.
*/
- Number local_element (const unsigned int local_index) const;
+ Number local_element (const size_type local_index) const;
/**
* Read and write access to the data field
* indices with indices
* <code>[local_size,local_size+n_ghosts]</code>.
*/
- Number &local_element (const unsigned int local_index);
+ Number &local_element (const size_type local_index);
//@}
* indices.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const std::vector<OtherNumber> &values);
/**
* values.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const ::dealii::Vector<OtherNumber> &values);
/**
* functions above.
*/
template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
+ void add (const size_type n_elements,
+ const size_type *indices,
const OtherNumber *values);
/**
* The size that is currently allocated in the
* val array.
*/
- unsigned int allocated_size;
+ size_type allocated_size;
/**
* Pointer to the array of
* A helper function that is used to resize
* the val array.
*/
- void resize_val (const unsigned int new_allocated_size);
+ void resize_val (const size_type new_allocated_size);
/*
* Make all other vector types
template <typename Number>
inline
- Vector<Number>::Vector (const unsigned int size)
+ Vector<Number>::Vector (const size_type size)
:
allocated_size (0),
val (0),
reinit (c, true);
else if (partitioner.get() != c.partitioner.get())
{
- unsigned int local_ranges_different_loc = (local_range() !=
+ size_type local_ranges_different_loc = (local_range() !=
c.local_range());
if ((partitioner->n_mpi_processes() > 1 &&
Utilities::MPI::max(local_ranges_different_loc,
reinit (c, true);
else if (partitioner.get() != c.partitioner.get())
{
- unsigned int local_ranges_different_loc = (local_range() !=
+ size_type local_ranges_different_loc = (local_range() !=
c.local_range());
if ((partitioner->n_mpi_processes() > 1 &&
Utilities::MPI::max(local_ranges_different_loc,
template <typename Number>
inline
- types::global_dof_index Vector<Number>::size () const
+ size_type Vector<Number>::size () const
{
return partitioner->size();
}
template <typename Number>
inline
- unsigned int Vector<Number>::local_size () const
+ size_type Vector<Number>::local_size () const
{
return partitioner->local_size();
}
template <typename Number>
inline
- std::pair<types::global_dof_index, types::global_dof_index>
+ std::pair<size_type, size_type>
Vector<Number>::local_range () const
{
return partitioner->local_range();
inline
bool
Vector<Number>::in_local_range
- (const types::global_dof_index global_index) const
+ (const size_type global_index) const
{
return partitioner->in_local_range (global_index);
}
template <typename Number>
inline
- unsigned int
+ size_type
Vector<Number>::n_ghost_entries () const
{
return partitioner->n_ghost_indices();
template <typename Number>
inline
bool
- Vector<Number>::is_ghost_entry (const types::global_dof_index global_index) const
+ Vector<Number>::is_ghost_entry (const size_type global_index) const
{
return partitioner->is_ghost_entry (global_index);
}
template <typename Number>
inline
Number
- Vector<Number>::operator() (const types::global_dof_index global_index) const
+ Vector<Number>::operator() (const size_type global_index) const
{
return val[partitioner->global_to_local(global_index)];
}
template <typename Number>
inline
Number &
- Vector<Number>::operator() (const types::global_dof_index global_index)
+ Vector<Number>::operator() (const size_type global_index)
{
return val[partitioner->global_to_local (global_index)];
}
template <typename Number>
inline
Number
- Vector<Number>::operator[] (const types::global_dof_index global_index) const
+ Vector<Number>::operator[] (const size_type global_index) const
{
return operator()(global_index);
}
template <typename Number>
inline
Number &
- Vector<Number>::operator[] (const types::global_dof_index global_index)
+ Vector<Number>::operator[] (const size_type global_index)
{
return operator()(global_index);
}
template <typename Number>
inline
Number
- Vector<Number>::local_element (const unsigned int local_index) const
+ Vector<Number>::local_element (const size_type local_index) const
{
AssertIndexRange (local_index,
partitioner->local_size()+
template <typename Number>
inline
Number &
- Vector<Number>::local_element (const unsigned int local_index)
+ Vector<Number>::local_element (const size_type local_index)
{
AssertIndexRange (local_index,
partitioner->local_size()+
template <typename OtherNumber>
inline
void
- Vector<Number>::add (const std::vector<unsigned int> &indices,
+ Vector<Number>::add (const std::vector<size_type> &indices,
const std::vector<OtherNumber> &values)
{
AssertDimension (indices.size(), values.size());
template <typename OtherNumber>
inline
void
- Vector<Number>::add (const std::vector<unsigned int> &indices,
+ Vector<Number>::add (const std::vector<size_type> &indices,
const ::dealii::Vector<OtherNumber> &values)
{
AssertDimension (indices.size(), values.size());
template <typename OtherNumber>
inline
void
- Vector<Number>::add (const unsigned int n_indices,
- const unsigned int *indices,
+ Vector<Number>::add (const size_type n_indices,
+ const size_type *indices,
const OtherNumber *values)
{
- for (unsigned int i=0; i<n_indices; ++i)
+ for (size_type i=0; i<n_indices; ++i)
{
Assert (numbers::is_finite(values[i]),
ExcMessage("The given value is not finite but either infinite or Not A Number (NaN)"));
Vector<Number>::clear_mpi_requests ()
{
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- for (unsigned int j=0; j<compress_requests.size(); j++)
+ for (size_type j=0; j<compress_requests.size(); j++)
MPI_Request_free(&compress_requests[j]);
compress_requests.clear();
- for (unsigned int j=0; j<update_ghost_values_requests.size(); j++)
+ for (size_type j=0; j<update_ghost_values_requests.size(); j++)
MPI_Request_free(&update_ghost_values_requests[j]);
update_ghost_values_requests.clear();
#endif
template <typename Number>
void
- Vector<Number>::resize_val (const unsigned int new_alloc_size)
+ Vector<Number>::resize_val (const size_type new_alloc_size)
{
if (new_alloc_size > allocated_size)
{
template <typename Number>
void
- Vector<Number>::reinit (const unsigned int size,
- const bool fast)
+ Vector<Number>::reinit (const size_type size,
+ const bool fast)
{
clear_mpi_requests();
// check whether we need to reallocate
if (partitioner.get() != v.partitioner.get())
{
partitioner = v.partitioner;
- const unsigned int new_allocated_size = partitioner->local_size() +
- partitioner->n_ghost_indices();
+ const size_type new_allocated_size = partitioner->local_size() +
+ partitioner->n_ghost_indices();
resize_val (new_allocated_size);
vector_view.reinit (partitioner->local_size(), val);
}
partitioner = partitioner_in;
// set vector size and allocate memory
- const unsigned int new_allocated_size = partitioner->local_size() +
- partitioner->n_ghost_indices();
+ const size_type new_allocated_size = partitioner->local_size() +
+ partitioner->n_ghost_indices();
resize_val (new_allocated_size);
vector_view.reinit (partitioner->local_size(), val);
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- const unsigned int n_import_targets = part.import_targets().size();
- const unsigned int n_ghost_targets = part.ghost_targets().size();
+ const size_type n_import_targets = part.import_targets().size();
+ const size_type n_ghost_targets = part.ghost_targets().size();
// Need to send and receive the data. Use
// non-blocking communication, where it is
// up yet
if (import_data == 0)
import_data = new Number[part.n_import_indices()];
- for (unsigned int i=0; i<n_import_targets; i++)
+ for (size_type i=0; i<n_import_targets; i++)
{
MPI_Recv_init (&import_data[current_index_start],
part.import_targets()[i].second*sizeof(Number),
Assert (part.local_size() == vector_view.size(), ExcInternalError());
current_index_start = part.local_size();
- for (unsigned int i=0; i<n_ghost_targets; i++)
+ for (size_type i=0; i<n_ghost_targets; i++)
{
MPI_Send_init (&this->val[current_index_start],
part.ghost_targets()[i].second*sizeof(Number),
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- const unsigned int n_import_targets = part.import_targets().size();
- const unsigned int n_ghost_targets = part.ghost_targets().size();
+ const size_type n_import_targets = part.import_targets().size();
+ const size_type n_ghost_targets = part.ghost_targets().size();
AssertDimension (n_ghost_targets+n_import_targets,
compress_requests.size());
Assert (ierr == MPI_SUCCESS, ExcInternalError());
Number *read_position = import_data;
- std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+ std::vector<std::pair<size_type, size_type> >::const_iterator
my_imports = part.import_indices().begin();
// If add_ghost_data is set, add the imported
// vector entries.
if (add_ghost_data == true)
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+ for (size_type j=my_imports->first; j<my_imports->second; j++)
local_element(j) += *read_position++;
else
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+ for (size_type j=my_imports->first; j<my_imports->second; j++)
local_element(j) = *read_position++;
AssertDimension(read_position-import_data,part.n_import_indices());
}
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- const unsigned int n_import_targets = part.import_targets().size();
- const unsigned int n_ghost_targets = part.ghost_targets().size();
+ const size_type n_import_targets = part.import_targets().size();
+ const size_type n_ghost_targets = part.ghost_targets().size();
// Need to send and receive the data. Use
// non-blocking communication, where it is
{
Assert (part.local_size() == vector_view.size(),
ExcInternalError());
- unsigned int current_index_start = part.local_size();
+ size_type current_index_start = part.local_size();
update_ghost_values_requests.resize (n_import_targets+n_ghost_targets);
- for (unsigned int i=0; i<n_ghost_targets; i++)
+ for (size_type i=0; i<n_ghost_targets; i++)
{
// allow writing into ghost indices even
// though we are in a const function
if (import_data == 0 && part.n_import_indices() > 0)
import_data = new Number[part.n_import_indices()];
current_index_start = 0;
- for (unsigned int i=0; i<n_import_targets; i++)
+ for (size_type i=0; i<n_import_targets; i++)
{
MPI_Send_init (&import_data[current_index_start],
part.import_targets()[i].second*sizeof(Number),
{
Assert (import_data != 0, ExcInternalError());
Number *write_position = import_data;
- std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+ std::vector<std::pair<size_type, size_type> >::const_iterator
my_imports = part.import_indices().begin();
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+ for (size_type j=my_imports->first; j<my_imports->second; j++)
*write_position++ = local_element(j);
}
<< partitioner->size() << std::endl
<< "Vector data:" << std::endl;
if (across)
- for (unsigned int i=0; i<partitioner->local_size(); ++i)
+ for (size_type i=0; i<partitioner->local_size(); ++i)
out << local_element(i) << ' ';
else
- for (unsigned int i=0; i<partitioner->local_size(); ++i)
+ for (size_type i=0; i<partitioner->local_size(); ++i)
out << local_element(i) << std::endl;
out << std::endl;
out << "Ghost entries (global index / value):" << std::endl;
if (across)
- for (unsigned int i=0; i<partitioner->n_ghost_indices(); ++i)
+ for (size_type i=0; i<partitioner->n_ghost_indices(); ++i)
out << '(' << partitioner->ghost_indices().nth_index_in_set(i)
<< '/' << local_element(partitioner->local_size()+i) << ") ";
else
- for (unsigned int i=0; i<partitioner->n_ghost_indices(); ++i)
+ for (size_type i=0; i<partitioner->n_ghost_indices(); ++i)
out << '(' << partitioner->ghost_indices().nth_index_in_set(i)
<< '/' << local_element(partitioner->local_size()+i) << ")"
<< std::endl;
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
MPI_Barrier (partitioner->get_communicator());
- for (unsigned int i=partitioner->this_mpi_process()+1;
+ for (size_type i=partitioner->this_mpi_process()+1;
i<partitioner->n_mpi_processes(); i++)
MPI_Barrier (partitioner->get_communicator());
#endif
*/
typedef std::size_t size_type;
-#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+#ifdef DEAL_II_EPETRA_NO_64BIT_GLOBAL_INDICES
/**
* Declare the type of integer.
*/
*/
typedef std::size_t size_type;
-#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+#ifdef DEAL_II_EPETRA_NO_64BIT_GLOBAL_INDICES
/**
* Declare type of integer.
*/
*/
typedef std::size_t size_type;
-#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+#ifdef DEAL_II_EPETRA_NO_64BIT_GLOBAL_INDICES
/**
* Declare type of integer.
*/
*/
typedef std::size_t size_type;
-#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+#ifdef DEAL_II_EPETRA_NO_64BIT_GLOBAL_INDICES
/**
* Declare type of integer.
*/
*/
typedef std::size_t size_type;
-#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+#ifdef DEAL_II_EPETRA_NO_64BIT_GLOBAL_INDICES
/**
* Declare type of integer.
*/
*/
typedef std::size_t size_type;
-#ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
+#ifdef DEAL_II_EPETRA_NO_64BIT_GLOBAL_INDICES
/**
* Declare type of integer.
*/
* Since this is not a distributed
* vector the method always returns true.
*/
- bool in_local_range (const types::global_dof_index global_index) const;
+ bool in_local_range (const size_type global_index) const;
/**
* Return dimension of the vector.
template <typename Number>
inline
bool Vector<Number>::in_local_range
-(const types::global_dof_index) const
+(const size_type) const
{
return true;
}
template <typename number>
BlockMatrixArray<number>::BlockMatrixArray (
- const unsigned int n_block_rows,
- const unsigned int n_block_cols)
+ const size_type n_block_rows,
+ const size_type n_block_cols)
: block_rows (n_block_rows),
block_cols (n_block_cols)
{}
template <typename number>
BlockMatrixArray<number>::BlockMatrixArray (
- const unsigned int n_block_rows,
- const unsigned int n_block_cols,
+ const size_type n_block_rows,
+ const size_type n_block_cols,
VectorMemory<Vector<number> > &)
: block_rows (n_block_rows),
block_cols (n_block_cols)
template <typename number>
void
BlockMatrixArray<number>::initialize (
- const unsigned int n_block_rows,
- const unsigned int n_block_cols,
+ const size_type n_block_rows,
+ const size_type n_block_cols,
VectorMemory<Vector<number> > &)
{
block_rows = n_block_rows;
template <typename number>
void
BlockMatrixArray<number>::initialize (
- const unsigned int n_block_rows,
- const unsigned int n_block_cols)
+ const size_type n_block_rows,
+ const size_type n_block_cols)
{
block_rows = n_block_rows;
block_cols = n_block_cols;
template <typename number>
void
BlockMatrixArray<number>::reinit (
- const unsigned int n_block_rows,
- const unsigned int n_block_cols)
+ const size_type n_block_rows,
+ const size_type n_block_cols)
{
clear();
block_rows = n_block_rows;
number result = 0.;
- for (unsigned int i=0; i<block_rows; ++i)
+ for (size_type i=0; i<block_rows; ++i)
{
aux.reinit(u.block(i));
for (m = entries.begin(); m != end ; ++m)
template <typename number>
-unsigned int
+size_type
BlockMatrixArray<number>::n_block_rows () const
{
return block_rows;
template <typename number>
-unsigned int
+size_type
BlockMatrixArray<number>::n_block_cols () const
{
return block_cols;
template <typename number>
BlockTrianglePrecondition<number>::BlockTrianglePrecondition(
- const unsigned int block_rows,
+ const size_type block_rows,
VectorMemory<Vector<number> > &,
const bool backward)
:
template <typename number>
BlockTrianglePrecondition<number>::BlockTrianglePrecondition(
- const unsigned int block_rows)
+ const size_type block_rows)
:
BlockMatrixArray<number> (block_rows, block_rows),
backward(false)
template <typename number>
void
BlockTrianglePrecondition<number>::initialize(
- const unsigned int n_block_rows,
+ const size_type n_block_rows,
VectorMemory<Vector<number> > &,
const bool backward)
{
template <typename number>
void
BlockTrianglePrecondition<number>::reinit (
- const unsigned int n)
+ const size_type n)
{
BlockMatrixArray<number>::reinit(n,n);
}
void
BlockTrianglePrecondition<number>::do_row (
BlockVector<number> &dst,
- unsigned int row_num) const
+ size_type row_num) const
{
GrowingVectorMemory<Vector<number> > mem;
typename std::vector<typename BlockMatrixArray<number>::Entry>::const_iterator
// they are not ordered by rows.
for (; m != end ; ++m)
{
- const unsigned int i=m->row;
+ const size_type i=m->row;
// Ignore everything not in
// this row
if (i != row_num)
continue;
- const unsigned int j=m->col;
+ const size_type j=m->col;
// Only use the lower (upper)
// triangle for forward
// (backward) substitution
else
{
aux = 0.;
- for (unsigned int i=0; i<diagonals.size(); ++i)
+ for (size_type i=0; i<diagonals.size(); ++i)
{
m = diagonals[i];
// First, divide by the current
if (backward)
{
- for (unsigned int i=n_block_rows(); i>0;)
+ for (size_type i=n_block_rows(); i>0;)
do_row(dst, --i);
}
else
{
- for (unsigned int i=0; i<n_block_rows(); ++i)
+ for (size_type i=0; i<n_block_rows(); ++i)
do_row(dst, i);
}
template <class SparsityPatternBase>
BlockSparsityPatternBase<SparsityPatternBase>::
-BlockSparsityPatternBase (const unsigned int n_block_rows,
- const unsigned int n_block_columns)
+BlockSparsityPatternBase (const size_type n_block_rows,
+ const size_type n_block_columns)
:
rows (0),
columns (0)
template <class SparsityPatternBase>
void
BlockSparsityPatternBase<SparsityPatternBase>::
-reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns)
+reinit (const size_type n_block_rows,
+ const size_type n_block_columns)
{
// delete previous content and
// clean the sub_objects array
// completely
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<columns; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<columns; ++j)
{
SparsityPatternBase *sp = sub_objects[i][j];
sub_objects[i][j] = 0;
sub_objects.reinit (rows, columns);
// allocate new objects
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<columns; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<columns; ++j)
{
SparsityPatternBase *p = new SparsityPatternBase;
sub_objects[i][j] = p;
Assert (rows == bsp.rows, ExcDimensionMismatch(rows, bsp.rows));
Assert (columns == bsp.columns, ExcDimensionMismatch(columns, bsp.columns));
// copy objects
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<columns; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<columns; ++j)
*sub_objects[i][j] = *bsp.sub_objects[i][j];
// update index objects
collect_sizes ();
void
BlockSparsityPatternBase<SparsityPatternBase>::collect_sizes ()
{
- std::vector<unsigned int> row_sizes (rows);
- std::vector<unsigned int> col_sizes (columns);
+ std::vector<size_type > row_sizes (rows);
+ std::vector<size_type > col_sizes (columns);
// first find out the row sizes
// from the first block column
- for (unsigned int r=0; r<rows; ++r)
+ for (size_type r=0; r<rows; ++r)
row_sizes[r] = sub_objects[r][0]->n_rows();
// then check that the following
// block columns have the same
// sizes
- for (unsigned int c=1; c<columns; ++c)
- for (unsigned int r=0; r<rows; ++r)
+ for (size_type c=1; c<columns; ++c)
+ for (size_type r=0; r<rows; ++r)
Assert (row_sizes[r] == sub_objects[r][c]->n_rows(),
ExcIncompatibleRowNumbers (r,0,r,c));
// then do the same with the columns
- for (unsigned int c=0; c<columns; ++c)
+ for (size_type c=0; c<columns; ++c)
col_sizes[c] = sub_objects[0][c]->n_cols();
- for (unsigned int r=1; r<rows; ++r)
- for (unsigned int c=0; c<columns; ++c)
+ for (size_type r=1; r<rows; ++r)
+ for (size_type c=0; c<columns; ++c)
Assert (col_sizes[c] == sub_objects[r][c]->n_cols(),
ExcIncompatibleRowNumbers (0,c,r,c));
void
BlockSparsityPatternBase<SparsityPatternBase>::compress ()
{
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<columns; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<columns; ++j)
sub_objects[i][j]->compress ();
}
bool
BlockSparsityPatternBase<SparsityPatternBase>::empty () const
{
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<columns; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<columns; ++j)
if (sub_objects[i][j]->empty () == false)
return false;
return true;
template <class SparsityPatternBase>
-unsigned int
+size_type
BlockSparsityPatternBase<SparsityPatternBase>::max_entries_per_row () const
{
- unsigned int max_entries = 0;
- for (unsigned int block_row=0; block_row<rows; ++block_row)
+ size_type max_entries = 0;
+ for (size_type block_row=0; block_row<rows; ++block_row)
{
- unsigned int this_row = 0;
- for (unsigned int c=0; c<columns; ++c)
+ size_type this_row = 0;
+ for (size_type c=0; c<columns; ++c)
this_row += sub_objects[block_row][c]->max_entries_per_row ();
if (this_row > max_entries)
template <class SparsityPatternBase>
-types::global_dof_index
+size_type
BlockSparsityPatternBase<SparsityPatternBase>::n_rows () const
{
// only count in first column, since
// all rows should be equivalent
- types::global_dof_index count = 0;
- for (unsigned int r=0; r<rows; ++r)
+ size_type count = 0;
+ for (size_type r=0; r<rows; ++r)
count += sub_objects[r][0]->n_rows();
return count;
}
template <class SparsityPatternBase>
-types::global_dof_index
+size_type
BlockSparsityPatternBase<SparsityPatternBase>::n_cols () const
{
// only count in first row, since
// all rows should be equivalent
- types::global_dof_index count = 0;
- for (unsigned int c=0; c<columns; ++c)
+ size_type count = 0;
+ for (size_type c=0; c<columns; ++c)
count += sub_objects[0][c]->n_cols();
return count;
}
template <class SparsityPatternBase>
-unsigned int
+size_type
BlockSparsityPatternBase<SparsityPatternBase>::n_nonzero_elements () const
{
- unsigned int count = 0;
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<columns; ++j)
+ size_type count = 0;
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<columns; ++j)
count += sub_objects[i][j]->n_nonzero_elements ();
return count;
}
void
BlockSparsityPatternBase<SparsityPatternBase>::print(std::ostream &out) const
{
- unsigned int k=0;
- for (unsigned int ib=0; ib<n_block_rows(); ++ib)
+ size_type k=0;
+ for (size_type ib=0; ib<n_block_rows(); ++ib)
{
- for (unsigned int i=0; i<block(ib,0).n_rows(); ++i)
+ for (size_type i=0; i<block(ib,0).n_rows(); ++i)
{
out << '[' << i+k;
- unsigned int l=0;
- for (unsigned int jb=0; jb<n_block_cols(); ++jb)
+ size_type l=0;
+ for (size_type jb=0; jb<n_block_cols(); ++jb)
{
const SparsityPatternBase &b = block(ib,jb);
- for (unsigned int j=0; j<b.n_cols(); ++j)
+ for (size_type j=0; j<b.n_cols(); ++j)
if (b.exists(i,j))
out << ',' << l+j;
l += b.n_cols();
void
BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::print(std::ostream &out) const
{
- unsigned int k=0;
- for (unsigned int ib=0; ib<n_block_rows(); ++ib)
+ size_type k=0;
+ for (size_type ib=0; ib<n_block_rows(); ++ib)
{
- for (unsigned int i=0; i<block(ib,0).n_rows(); ++i)
+ for (size_type i=0; i<block(ib,0).n_rows(); ++i)
{
out << '[' << i+k;
- unsigned int l=0;
- for (unsigned int jb=0; jb<n_block_cols(); ++jb)
+ size_type l=0;
+ for (size_type jb=0; jb<n_block_cols(); ++jb)
{
const CompressedSimpleSparsityPattern &b = block(ib,jb);
if (b.row_index_set().size()==0 || b.row_index_set().is_element(i))
- for (unsigned int j=0; j<b.n_cols(); ++j)
+ for (size_type j=0; j<b.n_cols(); ++j)
if (b.exists(i,j))
out << ',' << l+j;
l += b.n_cols();
void
BlockSparsityPatternBase<SparsityPatternBase>::print_gnuplot(std::ostream &out) const
{
- unsigned int k=0;
- for (unsigned int ib=0; ib<n_block_rows(); ++ib)
+ size_type k=0;
+ for (size_type ib=0; ib<n_block_rows(); ++ib)
{
- for (unsigned int i=0; i<block(ib,0).n_rows(); ++i)
+ for (size_type i=0; i<block(ib,0).n_rows(); ++i)
{
- unsigned int l=0;
- for (unsigned int jb=0; jb<n_block_cols(); ++jb)
+ size_type l=0;
+ for (size_type jb=0; jb<n_block_cols(); ++jb)
{
const SparsityPatternBase &b = block(ib,jb);
- for (unsigned int j=0; j<b.n_cols(); ++j)
+ for (size_type j=0; j<b.n_cols(); ++j)
if (b.exists(i,j))
out << l+j << " " << -static_cast<signed int>(i+k) << std::endl;;
l += b.n_cols();
-BlockSparsityPattern::BlockSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns)
+BlockSparsityPattern::BlockSparsityPattern (const size_type n_rows,
+ const size_type n_columns)
:
BlockSparsityPatternBase<SparsityPattern>(n_rows,
n_columns)
BlockSparsityPattern::reinit(
const BlockIndices &rows,
const BlockIndices &cols,
- const std::vector<std::vector<types::global_dof_index> > &row_lengths)
+ const std::vector<std::vector<size_type> > &row_lengths)
{
AssertDimension (row_lengths.size(), cols.size());
this->reinit(rows.size(), cols.size());
- for (unsigned int j=0; j<cols.size(); ++j)
- for (unsigned int i=0; i<rows.size(); ++i)
+ for (size_type j=0; j<cols.size(); ++j)
+ for (size_type i=0; i<rows.size(); ++i)
{
- const unsigned int start = rows.local_to_global(i, 0);
- const unsigned int length = rows.block_size(i);
+ const size_type start = rows.local_to_global(i, 0);
+ const size_type length = rows.block_size(i);
if (row_lengths[j].size()==1)
block(i,j).reinit(rows.block_size(i),
cols.block_size(j), row_lengths[j][0]);
else
{
- VectorSlice<const std::vector<unsigned int> >
+ VectorSlice<const std::vector<size_type > >
block_rows(row_lengths[j], start, length);
block(i,j).reinit(rows.block_size(i),
cols.block_size(j),
bool
BlockSparsityPattern::is_compressed () const
{
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<columns; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<columns; ++j)
if (sub_objects[i][j]->is_compressed () == false)
return false;
return true;
MemoryConsumption::memory_consumption (sub_objects) +
MemoryConsumption::memory_consumption (row_indices) +
MemoryConsumption::memory_consumption (column_indices));
- for (unsigned int r=0; r<rows; ++r)
- for (unsigned int c=0; c<columns; ++c)
+ for (size_type r=0; r<rows; ++r)
+ for (size_type c=0; c<columns; ++c)
mem += MemoryConsumption::memory_consumption (*sub_objects[r][c]);
return mem;
reinit (csp.n_block_rows(), csp.n_block_cols());
// copy over blocks
- for (unsigned int i=0; i<n_block_rows(); ++i)
- for (unsigned int j=0; j<n_block_cols(); ++j)
+ for (size_type i=0; i<n_block_rows(); ++i)
+ for (size_type j=0; j<n_block_cols(); ++j)
block(i,j).copy_from (csp.block(i,j));
// and finally enquire their new
reinit (csp.n_block_rows(), csp.n_block_cols());
// copy over blocks
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<rows; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<rows; ++j)
block(i,j).copy_from (csp.block(i,j));
// and finally enquire their new
reinit (csp.n_block_rows(), csp.n_block_cols());
// copy over blocks
- for (unsigned int i=0; i<rows; ++i)
- for (unsigned int j=0; j<rows; ++j)
+ for (size_type i=0; i<rows; ++i)
+ for (size_type j=0; j<rows; ++j)
block(i,j).copy_from (csp.block(i,j));
// and finally enquire their new
BlockCompressedSparsityPattern::
BlockCompressedSparsityPattern (
- const unsigned int n_rows,
- const unsigned int n_columns)
+ const size_type n_rows,
+ const size_type n_columns)
:
BlockSparsityPatternBase<CompressedSparsityPattern>(n_rows,
n_columns)
BlockCompressedSparsityPattern::
BlockCompressedSparsityPattern (
- const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices)
+ const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices)
{
reinit(row_indices, col_indices);
}
void
BlockCompressedSparsityPattern::reinit (
- const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes)
+ const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes)
{
BlockSparsityPatternBase<CompressedSparsityPattern>::reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0; i<row_block_sizes.size(); ++i)
- for (unsigned int j=0; j<col_block_sizes.size(); ++j)
+ for (size_type i=0; i<row_block_sizes.size(); ++i)
+ for (size_type j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
{
BlockSparsityPatternBase<CompressedSparsityPattern>::reinit(row_indices.size(),
col_indices.size());
- for (unsigned int i=0; i<row_indices.size(); ++i)
- for (unsigned int j=0; j<col_indices.size(); ++j)
+ for (size_type i=0; i<row_indices.size(); ++i)
+ for (size_type j=0; j<col_indices.size(); ++j)
this->block(i,j).reinit(row_indices.block_size(i),
col_indices.block_size(j));
this->collect_sizes();
BlockCompressedSetSparsityPattern::
BlockCompressedSetSparsityPattern (
- const unsigned int n_rows,
- const unsigned int n_columns)
+ const size_type n_rows,
+ const size_type n_columns)
:
BlockSparsityPatternBase<CompressedSetSparsityPattern>(n_rows,
n_columns)
BlockCompressedSetSparsityPattern::
BlockCompressedSetSparsityPattern (
- const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices)
+ const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices)
{
reinit(row_indices, col_indices);
}
void
BlockCompressedSetSparsityPattern::reinit (
- const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes)
+ const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes)
{
BlockSparsityPatternBase<CompressedSetSparsityPattern>::reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0; i<row_block_sizes.size(); ++i)
- for (unsigned int j=0; j<col_block_sizes.size(); ++j)
+ for (size_type i=0; i<row_block_sizes.size(); ++i)
+ for (size_type j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
{
BlockSparsityPatternBase<CompressedSetSparsityPattern>::reinit(row_indices.size(),
col_indices.size());
- for (unsigned int i=0; i<row_indices.size(); ++i)
- for (unsigned int j=0; j<col_indices.size(); ++j)
+ for (size_type i=0; i<row_indices.size(); ++i)
+ for (size_type j=0; j<col_indices.size(); ++j)
this->block(i,j).reinit(row_indices.block_size(i),
col_indices.block_size(j));
this->collect_sizes();
BlockCompressedSimpleSparsityPattern::
-BlockCompressedSimpleSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns)
+BlockCompressedSimpleSparsityPattern (const size_type n_rows,
+ const size_type n_columns)
:
BlockSparsityPatternBase<CompressedSimpleSparsityPattern>(n_rows,
n_columns)
BlockCompressedSimpleSparsityPattern::
-BlockCompressedSimpleSparsityPattern (const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices)
+BlockCompressedSimpleSparsityPattern (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices)
:
BlockSparsityPatternBase<CompressedSimpleSparsityPattern>(row_indices.size(),
col_indices.size())
{
- for (unsigned int i=0; i<row_indices.size(); ++i)
- for (unsigned int j=0; j<col_indices.size(); ++j)
+ for (size_type i=0; i<row_indices.size(); ++i)
+ for (size_type j=0; j<col_indices.size(); ++j)
this->block(i,j).reinit(row_indices[i],col_indices[j]);
this->collect_sizes();
}
BlockSparsityPatternBase<CompressedSimpleSparsityPattern>(partitioning.size(),
partitioning.size())
{
- for (unsigned int i=0; i<partitioning.size(); ++i)
- for (unsigned int j=0; j<partitioning.size(); ++j)
+ for (size_type i=0; i<partitioning.size(); ++i)
+ for (size_type j=0; j<partitioning.size(); ++j)
this->block(i,j).reinit(partitioning[i].size(),
partitioning[j].size(),
partitioning[i]);
void
BlockCompressedSimpleSparsityPattern::reinit (
- const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes)
+ const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes)
{
BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::
reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0; i<row_block_sizes.size(); ++i)
- for (unsigned int j=0; j<col_block_sizes.size(); ++j)
+ for (size_type i=0; i<row_block_sizes.size(); ++i)
+ for (size_type j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
{
BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::
reinit(partitioning.size(), partitioning.size());
- for (unsigned int i=0; i<partitioning.size(); ++i)
- for (unsigned int j=0; j<partitioning.size(); ++j)
+ for (size_type i=0; i<partitioning.size(); ++i)
+ for (size_type j=0; j<partitioning.size(); ++j)
this->block(i,j).reinit(partitioning[i].size(),
partitioning[j].size(),
partitioning[i]);
BlockSparsityPattern::
- BlockSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns)
+ BlockSparsityPattern (const size_type n_rows,
+ const size_type n_columns)
:
dealii::BlockSparsityPatternBase<SparsityPattern>(n_rows,
n_columns)
BlockSparsityPattern::
- BlockSparsityPattern (const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices)
+ BlockSparsityPattern (const std::vector<size_type> &row_indices,
+ const std::vector<size_type> &col_indices)
:
BlockSparsityPatternBase<SparsityPattern>(row_indices.size(),
col_indices.size())
{
- for (unsigned int i=0; i<row_indices.size(); ++i)
- for (unsigned int j=0; j<col_indices.size(); ++j)
+ for (size_type i=0; i<row_indices.size(); ++i)
+ for (size_type j=0; j<col_indices.size(); ++j)
this->block(i,j).reinit(row_indices[i],col_indices[j]);
this->collect_sizes();
}
(parallel_partitioning.size(),
parallel_partitioning.size())
{
- for (unsigned int i=0; i<parallel_partitioning.size(); ++i)
- for (unsigned int j=0; j<parallel_partitioning.size(); ++j)
+ for (size_type i=0; i<parallel_partitioning.size(); ++i)
+ for (size_type j=0; j<parallel_partitioning.size(); ++j)
this->block(i,j).reinit(parallel_partitioning[i],
parallel_partitioning[j]);
this->collect_sizes();
(parallel_partitioning.size(),
parallel_partitioning.size())
{
- for (unsigned int i=0; i<parallel_partitioning.size(); ++i)
- for (unsigned int j=0; j<parallel_partitioning.size(); ++j)
+ for (size_type i=0; i<parallel_partitioning.size(); ++i)
+ for (size_type j=0; j<parallel_partitioning.size(); ++j)
this->block(i,j).reinit(parallel_partitioning[i],
parallel_partitioning[j],
communicator);
void
- BlockSparsityPattern::reinit (const std::vector<types::global_dof_index> &row_block_sizes,
- const std::vector<types::global_dof_index> &col_block_sizes)
+ BlockSparsityPattern::reinit (const std::vector<size_type> &row_block_sizes,
+ const std::vector<size_type> &col_block_sizes)
{
dealii::BlockSparsityPatternBase<SparsityPattern>::
reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0; i<row_block_sizes.size(); ++i)
- for (unsigned int j=0; j<col_block_sizes.size(); ++j)
+ for (size_type i=0; i<row_block_sizes.size(); ++i)
+ for (size_type j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
dealii::BlockSparsityPatternBase<SparsityPattern>::
reinit(parallel_partitioning.size(),
parallel_partitioning.size());
- for (unsigned int i=0; i<parallel_partitioning.size(); ++i)
- for (unsigned int j=0; j<parallel_partitioning.size(); ++j)
+ for (size_type i=0; i<parallel_partitioning.size(); ++i)
+ for (size_type j=0; j<parallel_partitioning.size(); ++j)
this->block(i,j).reinit(parallel_partitioning[i],
parallel_partitioning[j]);
this->collect_sizes();
dealii::BlockSparsityPatternBase<SparsityPattern>::
reinit(parallel_partitioning.size(),
parallel_partitioning.size());
- for (unsigned int i=0; i<parallel_partitioning.size(); ++i)
- for (unsigned int j=0; j<parallel_partitioning.size(); ++j)
+ for (size_type i=0; i<parallel_partitioning.size(); ++i)
+ for (size_type j=0; j<parallel_partitioning.size(); ++j)
this->block(i,j).reinit(parallel_partitioning[i],
parallel_partitioning[j],
communicator);
-ChunkSparsityPattern::ChunkSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
- const unsigned int max_per_row,
- const unsigned int chunk_size,
+ChunkSparsityPattern::ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size,
const bool)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
}
-ChunkSparsityPattern::ChunkSparsityPattern (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const unsigned int chunk_size)
+ChunkSparsityPattern::ChunkSparsityPattern (const size_type m,
+ const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
ChunkSparsityPattern::ChunkSparsityPattern (
- const types::global_dof_index m,
- const types::global_dof_index n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
+ const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size,
const bool)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
ChunkSparsityPattern::ChunkSparsityPattern (
- const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size)
+ const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
-ChunkSparsityPattern::ChunkSparsityPattern (const unsigned int n,
- const unsigned int max_per_row,
- const unsigned int chunk_size)
+ChunkSparsityPattern::ChunkSparsityPattern (const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size)
{
reinit (n, n, max_per_row, chunk_size);
}
ChunkSparsityPattern::ChunkSparsityPattern (
const types::global_dof_index m,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size,
const bool)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
ChunkSparsityPattern::ChunkSparsityPattern (
- const unsigned int m,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size)
+ const size_type m,
+ const std::vector<size_type > &row_lengths,
+ const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
void
-ChunkSparsityPattern::reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const unsigned int max_per_row,
- const unsigned int chunk_size,
+ChunkSparsityPattern::reinit (const size_type m,
+ const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size,
const bool)
{
reinit (m, n, max_per_row, chunk_size);
void
-ChunkSparsityPattern::reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const unsigned int chunk_size)
+ChunkSparsityPattern::reinit (const size_type m,
+ const size_type n,
+ const size_type max_per_row,
+ const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
// simply map this function to the
// other @p{reinit} function
- const std::vector<unsigned int> row_lengths (m, max_per_row);
+ const std::vector<size_type> row_lengths (m, max_per_row);
reinit (m, n, row_lengths, chunk_size);
}
ChunkSparsityPattern::reinit (
const types::global_dof_index m,
const types::global_dof_index n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const unsigned int chunk_size,
+ const VectorSlice<const std::vector<size_type> > &row_lengths,
+ const size_type chunk_size,
const bool)
{
reinit (m, n, row_lengths, chunk_size);
void
ChunkSparsityPattern::reinit (
- const unsigned int m,
- const unsigned int n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const unsigned int chunk_size)
+ const size_type m,
+ const size_type n,
+ const VectorSlice<const std::vector<size_type> > &row_lengths,
+ const size_type chunk_size)
{
Assert (row_lengths.size() == m, ExcInvalidNumber (m));
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
// (n/chunk_size). rounding up in integer
// arithmetic equals
// ((m+chunk_size-1)/chunk_size):
- const unsigned int m_chunks = (m+chunk_size-1) / chunk_size,
- n_chunks = (n+chunk_size-1) / chunk_size;
+ const size_type m_chunks = (m+chunk_size-1) / chunk_size,
+ n_chunks = (n+chunk_size-1) / chunk_size;
// compute the maximum number of chunks in
// each row. the passed array denotes the
// row zero at columns {0,2} and for row
// one at {4,6} --> we'll need 4 chunks for
// the first chunk row!) :
- std::vector<unsigned int> chunk_row_lengths (m_chunks, 0);
- for (unsigned int i=0; i<m; ++i)
+ std::vector<size_type> chunk_row_lengths (m_chunks, 0);
+ for (size_type i=0; i<m; ++i)
chunk_row_lengths[i/chunk_size] += row_lengths[i];
sparsity_pattern.reinit (m_chunks,
template <typename SparsityType>
void
ChunkSparsityPattern::copy_from (const SparsityType &csp,
- const unsigned int chunk_size,
+ const size_type chunk_size,
const bool)
{
copy_from (csp, chunk_size);
// and SparsityPattern that uses begin() as iterator type
template <typename Sparsity>
void copy_row (const Sparsity &csp,
- const unsigned int row,
+ const size_type row,
ChunkSparsityPattern &dst)
{
typename Sparsity::row_iterator col_num = csp.row_begin (row);
}
void copy_row (const SparsityPattern &csp,
- const unsigned int row,
+ const size_type row,
ChunkSparsityPattern &dst)
{
SparsityPattern::iterator col_num = csp.begin (row);
template <typename SparsityType>
void
ChunkSparsityPattern::copy_from (const SparsityType &csp,
- const unsigned int chunk_size)
+ const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
// count number of entries per row, then
// initialize the underlying sparsity
// pattern
- std::vector<unsigned int> entries_per_row (csp.n_rows(), 0);
- for (unsigned int row = 0; row<csp.n_rows(); ++row)
+ std::vector<size_type> entries_per_row (csp.n_rows(), 0);
+ for (size_type row = 0; row<csp.n_rows(); ++row)
entries_per_row[row] = csp.row_length(row);
reinit (csp.n_rows(), csp.n_cols(),
chunk_size);
// then actually fill it
- for (unsigned int row = 0; row<csp.n_rows(); ++row)
+ for (size_type row = 0; row<csp.n_rows(); ++row)
internal::copy_row(csp, row, *this);
// finally compress
template <typename number>
void ChunkSparsityPattern::copy_from (const FullMatrix<number> &matrix,
- const unsigned int chunk_size,
+ const size_type chunk_size,
const bool)
{
copy_from (matrix, chunk_size);
template <typename number>
void ChunkSparsityPattern::copy_from (const FullMatrix<number> &matrix,
- const unsigned int chunk_size)
+ const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
// pattern. remember to also allocate space for the diagonal entry (if that
// hasn't happened yet) if m==n since we always allocate that for diagonal
// matrices
- std::vector<unsigned int> entries_per_row (matrix.m(), 0);
- for (unsigned int row=0; row<matrix.m(); ++row)
+ std::vector<size_type> entries_per_row (matrix.m(), 0);
+ for (size_type row=0; row<matrix.m(); ++row)
{
- for (unsigned int col=0; col<matrix.n(); ++col)
+ for (size_type col=0; col<matrix.n(); ++col)
if (matrix(row,col) != 0)
++entries_per_row[row];
chunk_size);
// then actually fill it
- for (unsigned int row=0; row<matrix.m(); ++row)
- for (unsigned int col=0; col<matrix.n(); ++col)
+ for (size_type row=0; row<matrix.m(); ++row)
+ for (size_type col=0; col<matrix.n(); ++col)
if (matrix(row,col) != 0)
add (row,col);
void
ChunkSparsityPattern::reinit (
- const types::global_dof_index m,
- const types::global_dof_index n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
+ const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size,
const bool)
{
reinit (m, n, row_lengths, chunk_size);
void
ChunkSparsityPattern::reinit (
- const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size)
+ const size_type m,
+ const size_type n,
+ const std::vector<size_type> &row_lengths,
+ const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
-unsigned int
+size_type
ChunkSparsityPattern::max_entries_per_row () const
{
return sparsity_pattern.max_entries_per_row() * chunk_size;
void
-ChunkSparsityPattern::add (const types::global_dof_index i,
- const types::global_dof_index j)
+ChunkSparsityPattern::add (const size_type i,
+ const size_type j)
{
Assert (i<rows, ExcInvalidIndex(i,rows));
Assert (j<cols, ExcInvalidIndex(j,cols));
bool
-ChunkSparsityPattern::exists (const types::global_dof_index i,
- const types::global_dof_index j) const
+ChunkSparsityPattern::exists (const size_type i,
+ const size_type j) const
{
Assert (i<rows, ExcIndexRange(i,0,rows));
Assert (j<cols, ExcIndexRange(j,0,cols));
-unsigned int
-ChunkSparsityPattern::row_length (const types::global_dof_index i) const
+size_type
+ChunkSparsityPattern::row_length (const size_type i) const
{
Assert (i<rows, ExcIndexRange(i,0,rows));
-unsigned int
+size_type
ChunkSparsityPattern::n_nonzero_elements () const
{
if ((n_rows() % chunk_size == 0)
{
// columns align with chunks, but
// not rows
- unsigned int n = sparsity_pattern.n_nonzero_elements() *
- chunk_size *
- chunk_size;
+ size_type n = sparsity_pattern.n_nonzero_elements() *
+ chunk_size *
+ chunk_size;
n -= (sparsity_pattern.n_rows() * chunk_size - n_rows()) *
sparsity_pattern.row_length(sparsity_pattern.n_rows()-1) *
chunk_size;
// see what this leads to. follow the advice in the documentation of
// the sparsity pattern iterators to do the loop over individual rows,
// rather than all elements
- unsigned int n = 0;
+ size_type n = 0;
- for (unsigned int row = 0; row < sparsity_pattern.n_rows(); ++row)
+ for (size_type row = 0; row < sparsity_pattern.n_rows(); ++row)
{
SparsityPattern::const_iterator p = sparsity_pattern.begin(row);
for (; p!=sparsity_pattern.end(row); ++p)
AssertThrow (out, ExcIO());
- for (unsigned int i=0; i<sparsity_pattern.rows; ++i)
- for (unsigned int d=0;
+ for (size_type i=0; i<sparsity_pattern.rows; ++i)
+ for (size_type d=0;
(d<chunk_size) && (i*chunk_size + d < n_rows());
++d)
{
out << '[' << i *chunk_size+d;
- for (unsigned int j=sparsity_pattern.rowstart[i];
+ for (size_type j=sparsity_pattern.rowstart[i];
j<sparsity_pattern.rowstart[i+1]; ++j)
if (sparsity_pattern.colnums[j] != sparsity_pattern.invalid_entry)
- for (unsigned int e=0;
+ for (size_type e=0;
((e<chunk_size) &&
(sparsity_pattern.colnums[j]*chunk_size + e < n_cols()));
++e)
// for each entry in the underlying
// sparsity pattern, repeat everything
// chunk_size x chunk_size times
- for (unsigned int i=0; i<sparsity_pattern.rows; ++i)
- for (unsigned int j=sparsity_pattern.rowstart[i];
+ for (size_type i=0; i<sparsity_pattern.rows; ++i)
+ for (size_type j=sparsity_pattern.rowstart[i];
j<sparsity_pattern.rowstart[i+1]; ++j)
if (sparsity_pattern.colnums[j] != sparsity_pattern.invalid_entry)
- for (unsigned int d=0;
+ for (size_type d=0;
((d<chunk_size) &&
(sparsity_pattern.colnums[j]*chunk_size+d < n_cols()));
++d)
- for (unsigned int e=0;
+ for (size_type e=0;
(e<chunk_size) && (i*chunk_size + e < n_rows());
++e)
// while matrix entries are
-unsigned int
+size_type
ChunkSparsityPattern::bandwidth () const
{
// calculate the bandwidth from that of the
// explicit instantiations
template
void ChunkSparsityPattern::copy_from<CompressedSparsityPattern> (const CompressedSparsityPattern &,
- const unsigned int,
+ const size_type ,
const bool);
template
void ChunkSparsityPattern::copy_from<CompressedSetSparsityPattern> (const CompressedSetSparsityPattern &,
- const unsigned int,
+ const size_type ,
const bool);
template
void ChunkSparsityPattern::copy_from<CompressedSimpleSparsityPattern> (const CompressedSimpleSparsityPattern &,
- const unsigned int,
+ const size_type ,
const bool);
template
void ChunkSparsityPattern::copy_from<float> (const FullMatrix<float> &,
- const unsigned int,
+ const size_type ,
const bool);
template
void ChunkSparsityPattern::copy_from<double> (const FullMatrix<double> &,
- const unsigned int,
+ const size_type ,
const bool);
DEAL_II_NAMESPACE_CLOSE
-CompressedSetSparsityPattern::CompressedSetSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n)
+CompressedSetSparsityPattern::CompressedSetSparsityPattern (const size_type m,
+ const size_type n)
:
rows(0),
cols(0)
-CompressedSetSparsityPattern::CompressedSetSparsityPattern (const types::global_dof_index n)
+CompressedSetSparsityPattern::CompressedSetSparsityPattern (const size_type n)
:
rows(0),
cols(0)
void
-CompressedSetSparsityPattern::reinit (const types::global_dof_index m,
- const types::global_dof_index n)
+CompressedSetSparsityPattern::reinit (const size_type m,
+ const size_type n)
{
rows = m;
cols = n;
-unsigned int
+size_type
CompressedSetSparsityPattern::max_entries_per_row () const
{
- unsigned int m = 0;
- for (unsigned int i=0; i<rows; ++i)
+ size_type m = 0;
+ for (size_type i=0; i<rows; ++i)
{
- m = std::max (m, static_cast<unsigned int>(lines[i].entries.size()));
+ m = std::max (m, static_cast<size_type>(lines[i].entries.size()));
}
return m;
bool
-CompressedSetSparsityPattern::exists (const types::global_dof_index i,
- const types::global_dof_index j) const
+CompressedSetSparsityPattern::exists (const size_type i,
+ const size_type j) const
{
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
// 2. that the @p{add} function can
// be called on elements that
// already exist without any harm
- for (unsigned int row=0; row<rows; ++row)
+ for (size_type row=0; row<rows; ++row)
{
- for (std::set<unsigned int>::const_iterator
+ for (std::set<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end();
++j)
{
AssertThrow (out, ExcIO());
- for (unsigned int row=0; row<rows; ++row)
+ for (size_type row=0; row<rows; ++row)
{
out << '[' << row;
- for (std::set<unsigned int>::const_iterator
+ for (std::set<size_type >::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
out << ',' << *j;
{
AssertThrow (out, ExcIO());
- for (unsigned int row=0; row<rows; ++row)
+ for (size_type row=0; row<rows; ++row)
{
- for (std::set<unsigned int>::const_iterator
+ for (std::set<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
// while matrix entries are usually
-unsigned int
+size_type
CompressedSetSparsityPattern::bandwidth () const
{
- unsigned int b=0;
- for (unsigned int row=0; row<rows; ++row)
+ size_type b=0;
+ for (size_type row=0; row<rows; ++row)
{
- for (std::set<unsigned int>::const_iterator
+ for (std::set<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
- if (static_cast<unsigned int>(std::abs(static_cast<int>(row-*j))) > b)
+ if (static_cast<size_type>(std::abs(static_cast<int>(row-*j))) > b)
b = std::abs(static_cast<signed int>(row-*j));
}
-unsigned int
+size_type
CompressedSetSparsityPattern::n_nonzero_elements () const
{
- unsigned int n=0;
- for (unsigned int i=0; i<rows; ++i)
+ size_type n=0;
+ for (size_type i=0; i<rows; ++i)
{
n += lines[i].entries.size();
}
if (n_elements <= 0)
return;
- const unsigned int stop_size = entries.size() + n_elements;
+ const size_type stop_size = entries.size() + n_elements;
if (indices_are_sorted == true && n_elements > 3)
{
// first entry is a duplicate before
// actually doing something.
ForwardIterator my_it = begin;
- unsigned int col = *my_it;
- std::vector<unsigned int>::iterator it =
+ size_type col = *my_it;
+ std::vector<size_type>::iterator it =
Utilities::lower_bound(entries.begin(), entries.end(), col);
while (*it == col)
{
// resize vector by just inserting the
// list
- const unsigned int pos1 = it - entries.begin();
+ const size_type pos1 = it - entries.begin();
Assert (pos1 <= entries.size(), ExcInternalError());
entries.insert (it, my_it, end);
it = entries.begin() + pos1;
- Assert (entries.size() >= (unsigned int)(it-entries.begin()), ExcInternalError());
+ Assert (entries.size() >= (size_type)(it-entries.begin()), ExcInternalError());
// now merge the two lists.
- std::vector<unsigned int>::iterator it2 = it + (end-my_it);
+ std::vector<size_type>::iterator it2 = it + (end-my_it);
// as long as there are indices both in
// the end of the entries list and in the
*it++ = *it2++;
// resize and return
- const unsigned int new_size = it - entries.begin();
+ const size_type new_size = it - entries.begin();
Assert (new_size <= stop_size, ExcInternalError());
entries.resize (new_size);
return;
if (stop_size > entries.capacity())
entries.reserve (stop_size);
- unsigned int col = *my_it;
- std::vector<unsigned int>::iterator it, it2;
+ size_type col = *my_it;
+ std::vector<size_type>::iterator it, it2;
// insert the first element as for one
// entry only first check the last
// element (or if line is still empty)
std::size_t
CompressedSimpleSparsityPattern::Line::memory_consumption () const
{
- return entries.capacity()*sizeof(unsigned int)+sizeof(Line);
+ return entries.capacity()*sizeof(size_type)+sizeof(Line);
}
-CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
+CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const size_type m,
+ const size_type n,
const IndexSet &rowset_
)
:
-CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const types::global_dof_index n)
+CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const size_type n)
:
rows(0),
cols(0),
void
-CompressedSimpleSparsityPattern::reinit (const types::global_dof_index m,
- const types::global_dof_index n,
+CompressedSimpleSparsityPattern::reinit (const size_type m,
+ const size_type n,
const IndexSet &rowset_)
{
rows = m;
-unsigned int
+size_type
CompressedSimpleSparsityPattern::max_entries_per_row () const
{
- unsigned int m = 0;
- for (unsigned int i=0; i<lines.size(); ++i)
+ size_type m = 0;
+ for (size_type i=0; i<lines.size(); ++i)
{
- m = std::max (m, static_cast<unsigned int>(lines[i].entries.size()));
+ m = std::max (m, static_cast<size_type>(lines[i].entries.size()));
}
return m;
bool
-CompressedSimpleSparsityPattern::exists (const types::global_dof_index i,
- const types::global_dof_index j) const
+CompressedSimpleSparsityPattern::exists (const size_type i,
+ const size_type j) const
{
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
Assert( rowset.size()==0 || rowset.is_element(i), ExcInternalError());
- const unsigned int rowindex =
+ const size_type rowindex =
rowset.size()==0 ? i : rowset.index_within_set(i);
return std::binary_search (lines[rowindex].entries.begin(),
// 2. that the @p{add} function can
// be called on elements that
// already exist without any harm
- for (unsigned int row=0; row<lines.size(); ++row)
+ for (size_type row=0; row<lines.size(); ++row)
{
- const unsigned int rowindex =
+ const size_type rowindex =
rowset.size()==0 ? row : rowset.nth_index_in_set(row);
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end();
++j)
void
CompressedSimpleSparsityPattern::print (std::ostream &out) const
{
- for (unsigned int row=0; row<lines.size(); ++row)
+ for (size_type row=0; row<lines.size(); ++row)
{
out << '[' << (rowset.size()==0 ? row : rowset.nth_index_in_set(row));
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type >::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
out << ',' << *j;
void
CompressedSimpleSparsityPattern::print_gnuplot (std::ostream &out) const
{
- for (unsigned int row=0; row<lines.size(); ++row)
+ for (size_type row=0; row<lines.size(); ++row)
{
- const unsigned int rowindex =
+ const size_type rowindex =
rowset.size()==0 ? row : rowset.nth_index_in_set(row);
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type >::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
// while matrix entries are usually
-unsigned int
+size_type
CompressedSimpleSparsityPattern::bandwidth () const
{
- unsigned int b=0;
- for (unsigned int row=0; row<lines.size(); ++row)
+ size_type b=0;
+ for (size_type row=0; row<lines.size(); ++row)
{
- const unsigned int rowindex =
+ const size_type rowindex =
rowset.size()==0 ? row : rowset.nth_index_in_set(row);
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
- if (static_cast<unsigned int>(std::abs(static_cast<int>(rowindex-*j))) > b)
+ if (static_cast<size_type>(std::abs(static_cast<int>(rowindex-*j))) > b)
b = std::abs(static_cast<signed int>(rowindex-*j));
}
-unsigned int
+size_type
CompressedSimpleSparsityPattern::n_nonzero_elements () const
{
- unsigned int n=0;
- for (unsigned int i=0; i<lines.size(); ++i)
+ size_type n=0;
+ for (size_type i=0; i<lines.size(); ++i)
{
n += lines[i].entries.size();
}
}
-std::size_t
+size_type
CompressedSimpleSparsityPattern::memory_consumption () const
{
//TODO: IndexSet...
- std::size_t mem = sizeof(CompressedSimpleSparsityPattern);
- for (unsigned int i=0; i<lines.size(); ++i)
+ size_type mem = sizeof(CompressedSimpleSparsityPattern);
+ for (size_type i=0; i<lines.size(); ++i)
mem += MemoryConsumption::memory_consumption (lines[i]);
return mem;
// explicit instantiations
-template void CompressedSimpleSparsityPattern::Line::add_entries(types::global_dof_index *,
- types::global_dof_index *,
+template void CompressedSimpleSparsityPattern::Line::add_entries(size_type *,
+ size_type *,
const bool);
-template void CompressedSimpleSparsityPattern::Line::add_entries(const types::global_dof_index *,
- const types::global_dof_index *,
+template void CompressedSimpleSparsityPattern::Line::add_entries(const size_type *,
+ const size_type *,
const bool);
#ifndef DEAL_II_VECTOR_ITERATOR_IS_POINTER
template void CompressedSimpleSparsityPattern::Line::
-add_entries(std::vector<unsigned int>::iterator,
- std::vector<unsigned int>::iterator,
+add_entries(std::vector<size_type>::iterator,
+ std::vector<size_type>::iterator,
const bool);
#endif
-CompressedSparsityPattern::CompressedSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n)
+CompressedSparsityPattern::CompressedSparsityPattern (const size_type m,
+ const size_type n)
:
rows(0),
cols(0)
-CompressedSparsityPattern::CompressedSparsityPattern (const types::global_dof_index n)
+CompressedSparsityPattern::CompressedSparsityPattern (const size_type n)
:
rows(0),
cols(0)
void
-CompressedSparsityPattern::reinit (const types::global_dof_index m,
- const types::global_dof_index n)
+CompressedSparsityPattern::reinit (const size_type m,
+ const size_type n)
{
rows = m;
cols = n;
-unsigned int
+size_type
CompressedSparsityPattern::max_entries_per_row () const
{
- unsigned int m = 0;
- for (unsigned int i=0; i<rows; ++i)
+ size_type m = 0;
+ for (size_type i=0; i<rows; ++i)
{
if (lines[i].cache_entries != 0)
lines[i].flush_cache ();
- m = std::max (m, static_cast<unsigned int>(lines[i].entries.size()));
+ m = std::max (m, static_cast<size_type>(lines[i].entries.size()));
}
return m;
bool
-CompressedSparsityPattern::exists (const types::global_dof_index i,
- const types::global_dof_index j) const
+CompressedSparsityPattern::exists (const size_type i,
+ const size_type j) const
{
Assert (i<rows, ExcIndexRange(i, 0, rows));
Assert (j<cols, ExcIndexRange(j, 0, cols));
// 2. that the @p{add} function can
// be called on elements that
// already exist without any harm
- for (unsigned int row=0; row<rows; ++row)
+ for (size_type row=0; row<rows; ++row)
{
if (lines[row].cache_entries != 0)
lines[row].flush_cache ();
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end();
++j)
void
CompressedSparsityPattern::print (std::ostream &out) const
{
- for (unsigned int row=0; row<rows; ++row)
+ for (size_type row=0; row<rows; ++row)
{
if (lines[row].cache_entries != 0)
lines[row].flush_cache ();
out << '[' << row;
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
out << ',' << *j;
void
CompressedSparsityPattern::print_gnuplot (std::ostream &out) const
{
- for (unsigned int row=0; row<rows; ++row)
+ for (size_type row=0; row<rows; ++row)
{
if (lines[row].cache_entries != 0)
lines[row].flush_cache ();
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
// while matrix entries are usually
-unsigned int
+size_type
CompressedSparsityPattern::bandwidth () const
{
- unsigned int b=0;
- for (unsigned int row=0; row<rows; ++row)
+ size_type b=0;
+ for (size_type row=0; row<rows; ++row)
{
if (lines[row].cache_entries != 0)
lines[row].flush_cache ();
- for (std::vector<unsigned int>::const_iterator
+ for (std::vector<size_type>::const_iterator
j=lines[row].entries.begin();
j != lines[row].entries.end(); ++j)
- if (static_cast<unsigned int>(std::abs(static_cast<int>(row-*j))) > b)
+ if (static_cast<size_type>(std::abs(static_cast<int>(row-*j))) > b)
b = std::abs(static_cast<signed int>(row-*j));
}
-unsigned int
+size_type
CompressedSparsityPattern::n_nonzero_elements () const
{
- unsigned int n=0;
- for (unsigned int i=0; i<rows; ++i)
+ size_type n=0;
+ for (size_type i=0; i<rows; ++i)
{
if (lines[i].cache_entries != 0)
lines[i].flush_cache ();
// explicit instantiations
-template void CompressedSparsityPattern::Line::add_entries(types::global_dof_index *,
- types::global_dof_index *,
+template void CompressedSparsityPattern::Line::add_entries(size_type *,
+ size_type *,
const bool);
-template void CompressedSparsityPattern::Line::add_entries(const types::global_dof_index *,
- const types::global_dof_index *,
+template void CompressedSparsityPattern::Line::add_entries(const size_type *,
+ const size_type *,
const bool);
#ifndef DEAL_II_VECTOR_ITERATOR_IS_POINTER
template void CompressedSparsityPattern::Line::
-add_entries(std::vector<unsigned int>::iterator,
- std::vector<unsigned int>::iterator,
+add_entries(std::vector<size_type>::iterator,
+ std::vector<size_type>::iterator,
const bool);
#endif
bool
-ConstraintMatrix::check_zero_weight (const std::pair<unsigned int, double> &p)
+ConstraintMatrix::check_zero_weight (const std::pair<size_type, double> &p)
{
return (p.second == 0);
}
-std::size_t
+size_type
ConstraintMatrix::ConstraintLine::memory_consumption () const
{
return (MemoryConsumption::memory_consumption (line) +
void
-ConstraintMatrix::add_lines (const std::set<unsigned int> &lines)
+ConstraintMatrix::add_lines (const std::set<size_type> &lines)
{
- for (std::set<unsigned int>::const_iterator
+ for (std::set<size_type>::const_iterator
i = lines.begin(); i != lines.end(); ++i)
add_line (*i);
}
void
ConstraintMatrix::add_lines (const std::vector<bool> &lines)
{
- for (unsigned int i=0; i<lines.size(); ++i)
+ for (size_type i=0; i<lines.size(); ++i)
if (lines[i] == true)
add_line (i);
}
void
ConstraintMatrix::add_lines (const IndexSet &lines)
{
- for (unsigned int i=0; i<lines.n_elements(); ++i)
+ for (size_type i=0; i<lines.n_elements(); ++i)
add_line (lines.nth_index_in_set(i));
}
void
ConstraintMatrix::add_entries
-(const unsigned int line,
- const std::vector<std::pair<unsigned int,double> > &col_val_pairs)
+(const size_type line,
+ const std::vector<std::pair<size_type,double> > &col_val_pairs)
{
Assert (sorted==false, ExcMatrixIsClosed());
Assert (is_constrained(line), ExcLineInexistant(line));
// an entry for this column already
// exists, since we don't want to
// enter it twice
- for (std::vector<std::pair<unsigned int,double> >::const_iterator
+ for (std::vector<std::pair<size_type,double> >::const_iterator
col_val_pair = col_val_pairs.begin();
col_val_pair!=col_val_pairs.end(); ++col_val_pair)
{
line!=constraints.lines.end(); ++line)
if (filter.is_element(line->line))
{
- const unsigned int row = filter.index_within_set (line->line);
+ const size_type row = filter.index_within_set (line->line);
add_line (row);
set_inhomogeneity (row, line->inhomogeneity);
- for (unsigned int i=0; i<line->entries.size(); ++i)
+ for (size_type i=0; i<line->entries.size(); ++i)
if (filter.is_element(line->entries[i].first))
add_entry (row, filter.index_within_set (line->entries[i].first),
line->entries[i].second);
// modify the size any more after this
// point.
{
- std::vector<unsigned int> new_lines (lines_cache.size(),
- numbers::invalid_unsigned_int);
- unsigned int counter = 0;
+ std::vector<size_type> new_lines (lines_cache.size(),
+ numbers::invalid_size_type);
+ size_type counter = 0;
for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
line!=lines.end(); ++line, ++counter)
new_lines[calculate_line_index(line->line)] = counter;
// in debug mode: check whether we really
// set the pointers correctly.
- for (unsigned int i=0; i<lines_cache.size(); ++i)
- if (lines_cache[i] != numbers::invalid_unsigned_int)
+ for (size_type i=0; i<lines_cache.size(); ++i)
+ if (lines_cache[i] != numbers::invalid_size_int)
Assert (i == calculate_line_index(lines[lines_cache[i]].line),
ExcInternalError());
// Let us figure out the largest dof index. This is an upper bound for the
// number of constraints because it is an approximation for the number of dofs
// in our system.
- unsigned int largest_idx = 0;
+ size_type largest_idx = 0;
for (std::vector<ConstraintLine>::iterator line = lines.begin();
line!=lines.end(); ++line)
{
// efficient. also, we have to do
// it only once, rather than in
// each iteration
- unsigned int iteration = 0;
+ size_type iteration = 0;
while (true)
{
bool chained_constraint_replaced = false;
#ifdef DEBUG
// we need to keep track of how many replacements we do in this line, because we can
// end up in a cycle A->B->C->A without the number of entries growing.
- unsigned int n_replacements = 0;
+ size_type n_replacements = 0;
#endif
// elements that we don't
// store on the current
// processor
- unsigned int entry = 0;
+ size_type entry = 0;
while (entry < line->entries.size())
if (((local_lines.size() == 0)
||
// look up the chain
// of constraints for
// this entry
- const unsigned int dof_index = line->entries[entry].first;
- const double weight = line->entries[entry].second;
+ const size_type dof_index = line->entries[entry].first;
+ const double weight = line->entries[entry].second;
Assert (dof_index != line->line,
ExcMessage ("Cycle in constraints detected!"));
// of other dofs:
if (constrained_line->entries.size() > 0)
{
- for (unsigned int i=0; i<constrained_line->entries.size(); ++i)
+ for (size_type i=0; i<constrained_line->entries.size(); ++i)
Assert (dof_index != constrained_line->entries[i].first,
ExcMessage ("Cycle in constraints detected!"));
constrained_line->entries[0].second *
weight);
- for (unsigned int i=1; i<constrained_line->entries.size(); ++i)
+ for (size_type i=1; i<constrained_line->entries.size(); ++i)
line->entries
.push_back (std::make_pair (constrained_line->entries[i].first,
constrained_line->entries[i].second *
// lets us allocate the correct amount
// of memory for the constraint
// entries.
- unsigned int duplicates = 0;
- for (unsigned int i=1; i<line->entries.size(); ++i)
+ size_type duplicates = 0;
+ for (size_type i=1; i<line->entries.size(); ++i)
if (line->entries[i].first == line->entries[i-1].first)
duplicates++;
// resolve the duplicates
new_entries.reserve (line->entries.size() - duplicates);
new_entries.push_back(line->entries[0]);
- for (unsigned int j=1; j<line->entries.size(); ++j)
+ for (size_type j=1; j<line->entries.size(); ++j)
if (line->entries[j].first == line->entries[j-1].first)
{
Assert (new_entries.back().first == line->entries[j].first,
// really no duplicates
// left and that the list
// is still sorted
- for (unsigned int j=1; j<new_entries.size(); ++j)
+ for (size_type j=1; j<new_entries.size(); ++j)
{
Assert (new_entries[j].first != new_entries[j-1].first,
ExcInternalError());
// weights are also subject
// to round-off
double sum = 0;
- for (unsigned int i=0; i<line->entries.size(); ++i)
+ for (size_type i=0; i<line->entries.size(); ++i)
sum += line->entries[i].second;
if ((sum != 1.0) && (std::fabs (sum-1.) < 1.e-13))
{
- for (unsigned int i=0; i<line->entries.size(); ++i)
+ for (size_type i=0; i<line->entries.size(); ++i)
line->entries[i].second /= sum;
line->inhomogeneity /= sum;
}
if (other_constraints.lines_cache.size() > lines_cache.size())
lines_cache.resize(other_constraints.lines_cache.size(),
- numbers::invalid_unsigned_int);
+ numbers::invalid_size_type);
// first action is to fold into the present
// object possible constraints in the
line!=lines.end(); ++line)
{
tmp.clear ();
- for (unsigned int i=0; i<line->entries.size(); ++i)
+ for (size_type i=0; i<line->entries.size(); ++i)
{
// if the present dof is not
// constrained, or if we won't take
for (ConstraintLine::Entries::const_iterator j=other_line->begin();
j!=other_line->end(); ++j)
- tmp.push_back (std::pair<unsigned int,double>(j->first,
+ tmp.push_back (std::pair<size_type,double>(j->first,
j->second*weight));
line->inhomogeneity += other_constraints.get_inhomogeneity(line->entries[i].first) *
}
// update the lines cache
- unsigned int counter = 0;
+ size_type counter = 0;
for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
line!=lines.end(); ++line, ++counter)
lines_cache[calculate_line_index(line->line)] = counter;
-void ConstraintMatrix::shift (const unsigned int offset)
+void ConstraintMatrix::shift (const size_type offset)
{
//TODO: this doesn't work with IndexSets yet. [TH]
AssertThrow(local_lines.size()==0, ExcNotImplemented());
lines_cache.insert (lines_cache.begin(), offset,
- numbers::invalid_unsigned_int);
+ numbers::invalid_size_type);
for (std::vector<ConstraintLine>::iterator i = lines.begin();
i != lines.end(); ++i)
}
{
- std::vector<unsigned int> tmp;
+ std::vector<size_type> tmp;
lines_cache.swap (tmp);
}
new_line.reserve (uncondensed.n_rows());
std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- unsigned int shift = 0;
- unsigned int n_rows = uncondensed.n_rows();
+ size_type shift = 0;
+ size_type n_rows = uncondensed.n_rows();
if (next_constraint == lines.end())
// if no constraint is to be handled
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
new_line.push_back (row);
else
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (size_type row=0; row!=n_rows; ++row)
if (row == next_constraint->line)
{
// this line is constrained
// nothing more to do; finish rest
// of loop
{
- for (unsigned int i=row+1; i<n_rows; ++i)
+ for (size_type i=row+1; i<n_rows; ++i)
new_line.push_back (i-shift);
break;
};
// note: in this loop we need not check whether @p{next_constraint} is a
// valid iterator, since @p{next_constraint} is only evaluated so often as
// there are entries in new_line[*] which tells us which constraints exist
- for (unsigned int row=0; row<uncondensed.n_rows(); ++row)
+ for (size_type row=0; row<uncondensed.n_rows(); ++row)
if (new_line[row] != -1)
// line not constrained copy entries if column will not be condensed
// away, distribute otherwise
while (c->line != j->column())
++c;
- for (unsigned int q=0; q!=c->entries.size(); ++q)
+ for (size_type q=0; q!=c->entries.size(); ++q)
condensed.add (new_line[row], new_line[c->entries[q].first]);
}
else
// for each entry: distribute
if (new_line[j->column()] != -1)
// column is not constrained
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (size_type q=0; q!=next_constraint->entries.size(); ++q)
condensed.add (new_line[next_constraint->entries[q].first],
new_line[j->column()]);
std::vector<ConstraintLine>::const_iterator c = lines.begin();
while (c->line != j->column()) ++c;
- for (unsigned int p=0; p!=c->entries.size(); ++p)
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (size_type p=0; p!=c->entries.size(); ++p)
+ for (size_type q=0; q!=next_constraint->entries.size(); ++q)
condensed.add (new_line[next_constraint->entries[q].first],
new_line[c->entries[p].first]);
};
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
{
// regular line. loop over cols all valid cols. note that this
// changes the line we are presently working on: we add additional
// the last old entry and stop work there, but since operating on
// the newly added ones only takes two comparisons (column index
// valid, distribute[column] necessarily
- // ==numbers::invalid_unsigned_int), it is cheaper to not do so and
+ // ==numbers::invalid_size_type), it is cheaper to not do so and
// run right until the end of the line
for (SparsityPattern::iterator entry = sparsity.begin(row);
((entry != sparsity.end(row)) &&
entry->is_valid_entry());
++entry)
{
- const unsigned int column = entry->column();
+ const size_type column = entry->column();
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
{
// distribute entry
// at regular row
// @p{row} and
// irregular column
// sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size();
++q)
sparsity.add (row,
for (SparsityPattern::iterator entry = sparsity.begin(row);
(entry != sparsity.end(row)) && entry->is_valid_entry(); ++entry)
{
- const unsigned int column = entry->column();
- if (distribute[column] == numbers::invalid_unsigned_int)
+ const size_type column = entry->column();
+ if (distribute[column] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
// distribute entry at irregular
// row @p{row} and irregular column
// sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[column]].entries[q].first);
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over
// cols. note that as we
// proceed to distribute
// cols, the loop may get
// longer
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
{
// distribute entry
// at regular row
// before by tracking
// the length of this
// row
- unsigned int old_rowlength = sparsity.row_length(row);
- for (unsigned int q=0;
+ size_type old_rowlength = sparsity.row_length(row);
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size();
++q)
{
- const unsigned int
+ const size_type
new_col = lines[distribute[column]].entries[q].first;
sparsity.add (row, new_col);
- const unsigned int new_rowlength = sparsity.row_length(row);
+ const size_type new_rowlength = sparsity.row_length(row);
if ((new_col < column) && (old_rowlength != new_rowlength))
++j;
old_rowlength = new_rowlength;
}
else
// row must be distributed
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] == numbers::invalid_unsigned_int)
+ if (distribute[column] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
// distribute entry at irregular
// row @p{row} and irregular column
// sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[sparsity.column_number(row,j)]]
.entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
{
// regular line. loop over
// cols. note that as we proceed to
for (; col_num != sparsity.row_end (row); ++col_num)
{
- const unsigned int column = *col_num;
+ const size_type column = *col_num;
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
{
// row
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size();
++q)
{
- const unsigned int
+ const size_type
new_col = lines[distribute[column]].entries[q].first;
sparsity.add (row, new_col);
for (; col_num != sparsity.row_end (row); ++col_num)
{
- const unsigned int column = *col_num;
+ const size_type column = *col_num;
- if (distribute[column] == numbers::invalid_unsigned_int)
+ if (distribute[column] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
// distribute entry at irregular
// row @p{row} and irregular column
// sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[column]]
.entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over
// cols. note that as we
// proceed to distribute
// cols, the loop may get
// longer
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
{
// distribute entry
// at regular row
// before by tracking
// the length of this
// row
- unsigned int old_rowlength = sparsity.row_length(row);
- for (unsigned int q=0;
+ size_type old_rowlength = sparsity.row_length(row);
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size();
++q)
{
- const unsigned int
+ const size_type
new_col = lines[distribute[column]].entries[q].first;
sparsity.add (row, new_col);
- const unsigned int new_rowlength = sparsity.row_length(row);
+ const size_type new_rowlength = sparsity.row_length(row);
if ((new_col < column) && (old_rowlength != new_rowlength))
++j;
old_rowlength = new_rowlength;
}
else
// row must be distributed
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] == numbers::invalid_unsigned_int)
+ if (distribute[column] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
// distribute entry at irregular
// row @p{row} and irregular column
// sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[sparsity.column_number(row,j)]]
.entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
// get index of this row
// within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
+ const size_type block_row = block_index.first;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over
// all columns and see
// whether this column must
// this blockrow and the
// corresponding row
// therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const SparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
entry->is_valid_entry();
++entry)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global(block_col, entry->column());
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
// distribute entry at regular
// row @p{row} and irregular column
// global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (row,
lines[distribute[global_col]].entries[q].first);
// whole row into the
// chunks defined by the
// blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const SparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
entry->is_valid_entry();
++entry)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global (block_col, entry->column());
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first, global_col);
}
else
// row @p{row} and irregular column
// @p{global_col}
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
}
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
// get index of this row
// within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
- const unsigned int local_row = block_index.second;
+ const size_type block_row = block_index.first;
+ const size_type local_row = block_index.second;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over
// all columns and see
// whether this column must
// this blockrow and the
// corresponding row
// therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
- for (unsigned int j=0; j<block_sparsity.row_length(local_row); ++j)
+ for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global(block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
// distribute entry at regular
// row @p{row} and irregular column
// global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]]
.entries.size(); ++q)
sparsity.add (row,
// whole row into the
// chunks defined by the
// blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
- for (unsigned int j=0; j<block_sparsity.row_length(local_row); ++j)
+ for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global (block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
// row @p{row} and irregular column
// @p{global_col}
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
};
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
// get index of this row
// within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
- const unsigned int local_row = block_index.second;
+ const size_type block_row = block_index.first;
+ const size_type local_row = block_index.second;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over
// all columns and see
// whether this column must
// this blockrow and the
// corresponding row
// therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSetSparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
j = block_sparsity.row_begin(local_row);
j != block_sparsity.row_end(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global(block_col, *j);
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
// distribute entry at regular
// row @p{row} and irregular column
// global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]]
.entries.size(); ++q)
sparsity.add (row,
// whole row into the
// chunks defined by the
// blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSetSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
j = block_sparsity.row_begin(local_row);
j != block_sparsity.row_end(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global (block_col, *j);
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
// row @p{row} and irregular column
// @p{global_col}
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
};
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
// store for each index whether it must be
// distributed or not. If entry is
- // numbers::invalid_unsigned_int,
+ // numbers::invalid_size_type,
// no distribution is necessary.
// otherwise, the number states which line
// in the constraint matrix handles this
// index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
// get index of this row
// within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
- const unsigned int local_row = block_index.second;
+ const size_type block_row = block_index.first;
+ const size_type local_row = block_index.second;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
// regular line. loop over
// all columns and see
// whether this column must
// this blockrow and the
// corresponding row
// therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSimpleSparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
- for (unsigned int j=0; j<block_sparsity.row_length(local_row); ++j)
+ for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global(block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
// distribute entry at regular
// row @p{row} and irregular column
// global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]]
.entries.size(); ++q)
sparsity.add (row,
// whole row into the
// chunks defined by the
// blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSimpleSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
- for (unsigned int j=0; j<block_sparsity.row_length(local_row); ++j)
+ for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global (block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
// row @p{row} and irregular column
// @p{global_col}
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
};
// constraints indicate.
IndexSet my_indices (vec.size());
{
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
local_range = vec.local_range();
my_indices.add_range (local_range.first, local_range.second);
- std::set<unsigned int> individual_indices;
+ std::set<size_type> individual_indices;
for (constraint_iterator it = begin_my_constraints;
it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
if ((it->entries[i].first < local_range.first)
||
(it->entries[i].first >= local_range.second))
// next_constraint.line by adding the
// different contributions
double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
new_value += (vec_distribute(it->entries[i].first) *
it->entries[i].second);
vec(it->line) = new_value;
Assert (sorted==true, ExcMatrixIsClosed());
IndexSet my_indices (vec.size());
- for (unsigned int block=0; block<vec.n_blocks(); ++block)
+ for (size_type block=0; block<vec.n_blocks(); ++block)
{
typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
ConstraintLine index_comparison;
// constraints indicate. No caching done
// yet. would need some more clever data
// structures for doing that.
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
local_range = vec.block(block).local_range();
my_indices.add_range (local_range.first, local_range.second);
- std::set<unsigned int> individual_indices;
+ std::set<size_type> individual_indices;
for (constraint_iterator it = begin_my_constraints;
it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
if ((it->entries[i].first < local_range.first)
||
(it->entries[i].first >= local_range.second))
// here we import the data
vec_distribute.reinit(vec,true);
- for (unsigned int block=0; block<vec.n_blocks(); ++block)
+ for (size_type block=0; block<vec.n_blocks(); ++block)
{
typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
ConstraintLine index_comparison;
// next_constraint.line by adding the
// different contributions
double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
new_value += (vec_distribute(it->entries[i].first) *
it->entries[i].second);
vec(it->line) = new_value;
// all indices we need to read from
IndexSet my_indices (vec.size());
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
local_range = vec.local_range();
my_indices.add_range (local_range.first, local_range.second);
- std::set<unsigned int> individual_indices;
+ std::set<size_type> individual_indices;
for (constraint_iterator it = begin_my_constraints;
it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
if ((it->entries[i].first < local_range.first)
||
(it->entries[i].first >= local_range.second))
// next_constraint.line by adding the
// different contributions
PetscScalar new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
new_value += (PetscScalar(ghost_vec(it->entries[i].first)) *
it->entries[i].second);
vec(it->line) = new_value;
-bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const
+bool ConstraintMatrix::is_identity_constrained (const size_type index) const
{
if (is_constrained(index) == false)
return false;
-unsigned int ConstraintMatrix::max_constraint_indirections () const
+size_type ConstraintMatrix::max_constraint_indirections () const
{
- unsigned int return_value = 0;
+ size_type return_value = 0;
for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
i!=lines.end(); ++i)
// use static cast, since
// typeof(size)==std::size_t, which is !=
- // unsigned int on AIX
+ // size_type on AIX
return_value = std::max(return_value,
- static_cast<unsigned int>(i->entries.size()));
+ static_cast<size_type>(i->entries.size()));
return return_value;
}
void ConstraintMatrix::print (std::ostream &out) const
{
- for (unsigned int i=0; i!=lines.size(); ++i)
+ for (size_type i=0; i!=lines.size(); ++i)
{
// output the list of
// constraints as pairs of dofs
// and their weights
if (lines[i].entries.size() > 0)
{
- for (unsigned int j=0; j<lines[i].entries.size(); ++j)
+ for (size_type j=0; j<lines[i].entries.size(); ++j)
out << " " << lines[i].line
<< " " << lines[i].entries[j].first
<< ": " << lines[i].entries[j].second << "\n";
{
out << "digraph constraints {"
<< std::endl;
- for (unsigned int i=0; i!=lines.size(); ++i)
+ for (size_type i=0; i!=lines.size(); ++i)
{
// same concept as in the
// previous function
if (lines[i].entries.size() > 0)
- for (unsigned int j=0; j<lines[i].entries.size(); ++j)
+ for (size_type j=0; j<lines[i].entries.size(); ++j)
out << " " << lines[i].line << "->" << lines[i].entries[j].first
<< "; // weight: "
<< lines[i].entries[j].second
-std::size_t
+size_type
ConstraintMatrix::memory_consumption () const
{
return (MemoryConsumption::memory_consumption (lines) +
VectorType &condensed_vector) const; \
template void ConstraintMatrix:: \
distribute_local_to_global<VectorType > (const Vector<double> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<sdt::size_t> &, \
VectorType &, \
const FullMatrix<double> &) const; \
template void ConstraintMatrix::distribute<VectorType >(const VectorType &condensed,\
#define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
template void ConstraintMatrix:: \
distribute_local_to_global<VectorType > (const Vector<double> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
VectorType &, \
const FullMatrix<double> &) const
template void ConstraintMatrix:: \
distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<double> &, \
const Vector<double> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
MatrixType &, \
VectorType &, \
bool , \
template void ConstraintMatrix:: \
distribute_local_to_global<MatrixType,Vector<double> > (const FullMatrix<double> &, \
const Vector<double> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
MatrixType &, \
Vector<double> &, \
bool , \
template void ConstraintMatrix:: \
distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<double> &, \
const Vector<double> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
MatrixType &, \
VectorType &, \
bool , \
template void ConstraintMatrix:: \
distribute_local_to_global<MatrixType,Vector<double> > (const FullMatrix<double> &, \
const Vector<double> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
MatrixType &, \
Vector<double> &, \
bool , \
#define SPARSITY_FUNCTIONS(SparsityType) \
template void ConstraintMatrix::add_entries_local_to_global<SparsityType> (\
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
SparsityType &, \
const bool, \
const Table<2,bool> &, \
internal::bool2type<false>) const; \
template void ConstraintMatrix::add_entries_local_to_global<SparsityType> (\
- const std::vector<unsigned int> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
+ const std::vector<std::size_t> &, \
SparsityType &, \
const bool, \
const Table<2,bool> &) const
#define BLOCK_SPARSITY_FUNCTIONS(SparsityType) \
template void ConstraintMatrix::add_entries_local_to_global<SparsityType> (\
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
SparsityType &, \
const bool, \
const Table<2,bool> &, \
internal::bool2type<true>) const; \
template void ConstraintMatrix::add_entries_local_to_global<SparsityType> (\
- const std::vector<unsigned int> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
+ const std::vector<std::size_t> &, \
SparsityType &, \
const bool, \
const Table<2,bool> &) const
#define ONLY_MATRIX_FUNCTIONS(MatrixType) \
template void ConstraintMatrix::distribute_local_to_global<MatrixType > (\
const FullMatrix<double> &, \
- const std::vector<unsigned int> &, \
- const std::vector<unsigned int> &, \
+ const std::vector<std::size_t> &, \
+ const std::vector<std::size_t> &, \
MatrixType &) const
ONLY_MATRIX_FUNCTIONS(SparseMatrix<float>);