* <code>cell-@>active_fe_index</code>
* as last argument.
*/
- void get_dof_indices (std::vector<unsigned int> &dof_indices,
+ void get_dof_indices (std::vector<types::global_dof_index> &dof_indices,
const unsigned int fe_index = DH::default_fe_index) const;
- void get_mg_dof_indices (const int level, std::vector<unsigned int> &dof_indices, const unsigned int fe_index = DH::default_fe_index) const;
+ /**
+ * Return the indices of the dofs of this
+ * object in the standard ordering: dofs
+ * on vertex 0, dofs on vertex 1, ...
+ * dofs on line 0, dofs on line 1, ...,
+ * then quads, then hexes.
+ *
+ * It is assumed that the vector already
+ * has the right size beforehand. The
+ * indices refer to the local numbering
+ * for the level this line lives on.
+ */
- void set_mg_dof_indices (const int level, const std::vector<unsigned int> &dof_indices, const unsigned int fe_index = DH::default_fe_index);
+ void get_mg_dof_indices (const int level, std::vector<types::global_dof_index> &dof_indices, const unsigned int fe_index = DH::default_fe_index) const;
+
+ /**
+ * Sets the level DoF indices that are returned by get_mg_dof_indices.
+ */
+ void set_mg_dof_indices (const int level, const std::vector<types::global_dof_index> &dof_indices, const unsigned int fe_index = DH::default_fe_index);
/**
* Global DoF index of the <i>i</i>
* match the result of
* active_fe_index().
*/
- unsigned int vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index = DH::default_fe_index) const;
+ types::global_dof_index vertex_dof_index (const unsigned int vertex,
+ const unsigned int i,
+ const unsigned int fe_index = DH::default_fe_index) const;
- unsigned int mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index = DH::default_fe_index) const;
+ /**
+ * Returns the Global DoF index of the <i>i degree associated with the @p
+ * vertexth vertex on the level @p level. Also see vertex_dof_index().
+ */
+ types::global_dof_index mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index = DH::default_fe_index) const;
/**
* Index of the <i>i</i>th degree
* degrees are defined in the interior of
* the face.
*/
- unsigned int dof_index (const unsigned int i,
- const unsigned int fe_index = DH::default_fe_index) const;
+ types::global_dof_index dof_index (const unsigned int i,
+ const unsigned int fe_index = DH::default_fe_index) const;
- unsigned int mg_dof_index (const int level, const unsigned int i) const;
+ /**
+ * Returns the dof_index on the given level. Also see dof_index.
+ */
+ types::global_dof_index mg_dof_index (const int level, const unsigned int i) const;
/**
* @}
*
* Examples for this use are in the implementation of DoFRenumbering.
*/
- void dof_indices (std::vector<types::global_dof_index> &dof_indices) const;
- void get_active_or_mg_dof_indices (std::vector<unsigned int> &dof_indices) const;
++ void get_active_or_mg_dof_indices (std::vector<types::global_dof_index> &dof_indices) const;
/**
- * @deprecated Use dof_indices() instead.
- *
* Return the indices of the dofs of this
* quad in the standard ordering: dofs
* on vertex 0, dofs on vertex 1, etc,
*
* This is a function which requires that the cell be active.
*
+ * Also see get_active_or_mg_dof_indices().
+ *
* @deprecated Currently, this function can also be called for non-active cells, if all degrees of freedom of the FiniteElement are located in vertices. This functionality will vanish in a future release.
*/
- void get_dof_indices (std::vector<unsigned int> &dof_indices) const;
+ void get_dof_indices (std::vector<types::global_dof_index> &dof_indices) const;
/**
- * @deprecated Use dof_indices() with level_cell_iterator returned from begin_mg().
+ * @deprecated Use get_active_or_mg_dof_indices() with level_cell_iterator returned from begin_mg().
*
* Retrieve the global indices of the degrees of freedom on this cell in the level vector associated to the level of the cell.
*/
* cache, if one exists for the
* given DoF handler class.
*/
- void set_dof_indices (const std::vector<unsigned int> &dof_indices);
+ void set_dof_indices (const std::vector<types::global_dof_index> &dof_indices);
+
+ /**
+ * Set the Level DoF indices of this
+ * cell to the given values.
+ */
- void set_mg_dof_indices (const std::vector<unsigned int> &dof_indices);
+ void set_mg_dof_indices (const std::vector<types::global_dof_index> &dof_indices);
/**
* Update the cache in which we
template<class DH, bool lda>
inline
- void DoFCellAccessor<DH,lda>::dof_indices (std::vector<types::global_dof_index> &dof_indices) const
-void DoFCellAccessor<DH,lda>::get_active_or_mg_dof_indices (std::vector<unsigned int> &dof_indices) const
++void DoFCellAccessor<DH,lda>::get_active_or_mg_dof_indices (std::vector<types::global_dof_index> &dof_indices) const
{
if (lda)
get_mg_dof_indices (dof_indices);
ChunkSparsityPattern::ChunkSparsityPattern (
- const unsigned int m,
- const unsigned int n,
+ const types::global_dof_index m,
+ const types::global_dof_index n,
const std::vector<unsigned int> &row_lengths,
const unsigned int chunk_size,
- const bool optimize_diag)
+ const bool)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
ChunkSparsityPattern::ChunkSparsityPattern (
- const unsigned int m,
+ const types::global_dof_index m,
const std::vector<unsigned int> &row_lengths,
const unsigned int chunk_size,
- const bool optimize_diag)
+ const bool)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
- void copy_row (const Sparsity &csp,
- const unsigned int row,
- std::vector<int> &row_indices)
+ namespace internal
+ {
+ namespace
+ {
+ // distinguish between compressed sparsity types that define row_begin()
+ // and SparsityPattern that uses begin() as iterator type
+ template <typename Sparsity>
- for (unsigned int col=0; col_num != csp.row_end (row); ++col_num, ++col)
++ void copy_row (const Sparsity &csp,
++ const size_type row,
++ std::vector<int_type> &row_indices)
+ {
+ typename Sparsity::row_iterator col_num = csp.row_begin (row);
- const unsigned int row,
- std::vector<int> &row_indices)
++ for (size_type col=0; col_num != csp.row_end (row); ++col_num, ++col)
+ row_indices[col] = *col_num;
+ }
+
+ void copy_row (const dealii::SparsityPattern &csp,
- for (unsigned int col=0; col_num != csp.end (row); ++col_num, ++col)
++ const size_int row,
++ std::vector<int_type> &row_indices)
+ {
+ dealii::SparsityPattern::iterator col_num = csp.begin (row);
++ for (size_type col=0; col_num != csp.end (row); ++col_num, ++col)
+ row_indices[col] = col_num->column();
+ }
+ }
+ }
+
+
+
template <typename SparsityType>
void
SparseMatrix::reinit (const Epetra_Map &input_row_map,
column_space_map.reset (new Epetra_Map (input_col_map));
- const unsigned int first_row = input_row_map.MinMyGID(),
+ const size_t first_row = input_row_map.MinMyGID(),
last_row = input_row_map.MaxMyGID()+1;
- std::vector<int> n_entries_per_row(last_row-first_row);
+ std::vector<int_type> n_entries_per_row(last_row-first_row);
- for (unsigned int row=first_row; row<last_row; ++row)
+ for (size_type row=first_row; row<last_row; ++row)
n_entries_per_row[row-first_row] = sparsity_pattern.row_length(row);
- // The deal.II notation of a Sparsity
- // pattern corresponds to the Epetra
- // concept of a Graph. Hence, we generate
- // a graph by copying the sparsity pattern
- // into it, and then build up the matrix
- // from the graph. This is considerable
- // faster than directly filling elements
- // into the matrix. Moreover, it consumes
- // less memory, since the internal
- // reordering is done on ints only, and we
- // can leave the doubles aside.
-
- // for more than one processor, need to
- // specify only row map first and let the
- // matrix entries decide about the column
- // map (which says which columns are
- // present in the matrix, not to be
- // confused with the col_map that tells
- // how the domain dofs of the matrix will
- // be distributed). for only one
- // processor, we can directly assign the
- // columns as well. Compare this with bug
- // # 4123 in the Sandia Bugzilla.
+ // The deal.II notation of a Sparsity pattern corresponds to the Epetra
+ // concept of a Graph. Hence, we generate a graph by copying the sparsity
+ // pattern into it, and then build up the matrix from the graph. This is
+ // considerable faster than directly filling elements into the
+ // matrix. Moreover, it consumes less memory, since the internal
+ // reordering is done on ints only, and we can leave the doubles aside.
+
+ // for more than one processor, need to specify only row map first and let
+ // the matrix entries decide about the column map (which says which
+ // columns are present in the matrix, not to be confused with the col_map
+ // that tells how the domain dofs of the matrix will be distributed). for
+ // only one processor, we can directly assign the columns as well. Compare
+ // this with bug # 4123 in the Sandia Bugzilla.
std_cxx1x::shared_ptr<Epetra_CrsGraph> graph;
if (input_row_map.Comm().NumProc() > 1)
graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
graph->FillComplete(input_col_map, input_row_map);
graph->OptimizeStorage();
- // check whether we got the number of
- // columns right.
+ // check whether we got the number of columns right.
AssertDimension (sparsity_pattern.n_cols(),
- static_cast<unsigned int>(graph->NumGlobalCols()));
+ static_cast<size_type>(graph->NumGlobalCols()));
// And now finally generate the matrix.
matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
// Only do this on the rows owned
// locally on this processor.
- int_map local_row = matrix->LRID(static_cast<int_map>(row));
- int local_row = matrix->LRID(static_cast<int>(row));
++ int_type local_row = matrix->LRID(static_cast<int_type>(row));
if (local_row >= 0)
{
TrilinosScalar *values;
- int_map *col_indices;
- int_map num_entries;
- int *col_indices;
- int num_entries;
++ int_type *col_indices;
++ int_type num_entries;
const int ierr = matrix->ExtractMyRowView(local_row, num_entries,
values, col_indices);
Assert (ierr == 0,
ExcTrilinosError(ierr));
- int_map *diag_find = std::find(col_indices,col_indices+num_entries,
- int *diag_find = std::find(col_indices,col_indices+num_entries,
++ int_type *diag_find = std::find(col_indices,col_indices+num_entries,
local_row);
- int_map diag_index = (int_map)(diag_find - col_indices);
- int diag_index = (int)(diag_find - col_indices);
++ int_type diag_index = (int_type)(diag_find - col_indices);
- for (int_map j=0; j<num_entries; ++j)
- for (int j=0; j<num_entries; ++j)
++ for (int_type j=0; j<num_entries; ++j)
if (diag_index != j || new_diag_value == 0)
values[j] = 0.;
--
++
// explicit instantiations
//
template void
// get a representation of the present
// row
-- int ncols;
-- int colnums = sparsity_pattern->n_cols();
++ int_type ncols;
++ int_type colnums = sparsity_pattern->n_cols();
int ierr;
- ierr = sparsity_pattern->graph->ExtractGlobalRowCopy((int)this->a_row,
+ ierr = sparsity_pattern->graph->ExtractGlobalRowCopy((int_type)this->a_row,
colnums,
ncols,
- (int *)&(*colnum_cache)[0]);
+ (int_type *)&(*colnum_cache)[0]);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
// copy it into our caches if the
- const unsigned int row,
- std::vector<int> &row_indices)
+ namespace internal
+ {
+ namespace
+ {
+ // distinguish between compressed sparsity types that define row_begin()
+ // and SparsityPattern that uses begin() as iterator type
+ template <typename Sparsity>
+ void copy_row (const Sparsity &csp,
- for (unsigned int col=0; col_num != csp.row_end (row); ++col_num, ++col)
++ const size_type row,
++ std::vector<int_type> &row_indices)
+ {
+ typename Sparsity::row_iterator col_num = csp.row_begin (row);
- const unsigned int row,
- std::vector<int> &row_indices)
++ for (size_type col=0; col_num != csp.row_end (row); ++col_num, ++col)
+ row_indices[col] = *col_num;
+ }
+
+ void copy_row (const dealii::SparsityPattern &csp,
- for (unsigned int col=0; col_num != csp.end (row); ++col_num, ++col)
++ const size_type row,
++ std::vector<int_type> &row_indices)
+ {
+ dealii::SparsityPattern::iterator col_num = csp.begin (row);
++ for (size_type col=0; col_num != csp.end (row); ++col_num, ++col)
+ row_indices[col] = col_num->column();
+ }
+ }
+ }
+
+
+
template <typename SparsityType>
void
SparsityPattern::reinit (const Epetra_Map &input_row_map,
continue;
row_indices.resize (row_length, -1);
-
- typename SparsityType::row_iterator col_num = sp.row_begin (row),
- row_end = sp.row_end(row);
- for (size_type col = 0; col_num != row_end; ++col_num, ++col)
- row_indices[col] = *col_num;
-
- graph->InsertGlobalIndices (1, reinterpret_cast<int_type *>(&row),
+ internal::copy_row(sp, row, row_indices);
- graph->InsertGlobalIndices (1, reinterpret_cast<int *>(&row),
++ graph->InsertGlobalIndices (1, reinterpret_cast<int_type *>(&row),
row_length, &row_indices[0]);
}
// treated in the other functions.
matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
- // the next thing is to set right hand
- // side to the wanted value. there's one
- // drawback: if we write to individual
- // vector elements, then we have to do
- // that on all processors. however, some
- // processors may not need to set
- // anything because their chunk of
- // matrix/rhs do not contain any boundary
- // nodes. therefore, rather than using
- // individual calls, we use one call for
- // all elements, thereby making sure that
- // all processors call this function,
- // even if some only have an empty set of
- // elements to set
- right_hand_side.compress ();
- solution.compress ();
-
- std::vector<unsigned int> indices;
+ std::vector<types::global_dof_index> indices;
std::vector<PetscScalar> solution_values;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
dof = boundary_values.begin();
dof != boundary_values.end();
++dof)
// matrix classes in deal.II.
matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
- // the next thing is to set right
- // hand side to the wanted
- // value. there's one drawback:
- // if we write to individual
- // vector elements, then we have
- // to do that on all
- // processors. however, some
- // processors may not need to set
- // anything because their chunk
- // of matrix/rhs do not contain
- // any boundary nodes. therefore,
- // rather than using individual
- // calls, we use one call for all
- // elements, thereby making sure
- // that all processors call this
- // function, even if some only
- // have an empty set of elements
- // to set
- right_hand_side.compress ();
- solution.compress ();
-
- std::vector<unsigned int> indices;
+ std::vector<types::global_dof_index> indices;
std::vector<TrilinosScalar> solution_values;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
dof = boundary_values.begin();
dof != boundary_values.end();
++dof)