* This can be achieved by calling
* clear(), for example.
*/
- void set_size (const unsigned int size);
+ void set_size (const types::global_dof_index size);
/**
* Return the size of the index
* set. The latter information is
* returned by n_elements().
*/
- unsigned int size () const;
+ types::global_dof_index size () const;
/**
* Add the half-open range
* set have to have a smaller
* number than this value.
*/
- unsigned int index_space_size;
+ types::global_dof_index index_space_size;
/**
* This integer caches the index of the
inline
IndexSet::Range::Range ()
:
- begin(numbers::invalid_unsigned_int),
- end(numbers::invalid_unsigned_int)
+ begin(numbers::invalid_dof_index),
+ end(numbers::invalid_dof_index)
{}
inline
void
-IndexSet::set_size (const unsigned int sz)
+IndexSet::set_size (const types::global_dof_index sz)
{
Assert (ranges.empty(),
ExcMessage ("This function can only be called if the current "
inline
-unsigned int
+types::global_dof_index
IndexSet::size () const
{
return index_space_size;
else
{
Assert (false, ExcInternalError());
- return numbers::invalid_unsigned_int;
+ return numbers::invalid_dof_index;
}
}
* but tailored to be iterated over, and some
* indices may be duplicates.
*/
- const std::vector<std::pair<unsigned int, types::global_dof_index> > &
+ const std::vector<std::pair<types::global_dof_index, types::global_dof_index> > &
import_indices() const;
/**
* but tailored to be iterated over, and some
* indices may be duplicates.
*/
- std::vector<std::pair<unsigned int, types::global_dof_index> > import_indices_data;
+ std::vector<std::pair<types::global_dof_index, types::global_dof_index> > import_indices_data;
/**
* Caches the number of ghost indices. It
inline
- const std::vector<std::pair<unsigned int, types::global_dof_index> > &
+ const std::vector<std::pair<types::global_dof_index, types::global_dof_index> > &
Partitioner::import_indices() const
{
return import_indices_data;
*/
const unsigned int artificial_subdomain_id DEAL_II_DEPRECATED = static_cast<subdomain_id>(-2);
-//#define DEAL_II_USE_LARGE_INDEX_TYPE
+#define DEAL_II_USE_LARGE_INDEX_TYPE
#ifdef DEAL_II_USE_LARGE_INDEX_TYPE
/**
* The type used for global indices of
Assert (fe_index < dof_handler.finite_elements->size(),
ExcInternalError());
Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
+ numbers::invalid_dof_index,
ExcMessage ("This vertex is unused and has no DoFs associated with it"));
// hop along the list of index
// part. trigger an exception if
// we can't find a set for this
// particular fe_index
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
+ const types::global_dof_index starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
while (true)
{
ExcIndexRange (vertex_index, 0,
dof_handler.vertex_dofs_offsets.size()));
Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
+ numbers::invalid_dof_index,
ExcMessage ("This vertex is unused and has no DoFs associated with it"));
// hop along the list of index
"this DoFHandler"));
// if this vertex is unused, return 0
- if (dof_handler.vertex_dofs_offsets[vertex_index] == numbers::invalid_unsigned_int)
+ if (dof_handler.vertex_dofs_offsets[vertex_index] == numbers::invalid_dof_index)
return 0;
// hop along the list of index
// sets and count the number of
// hops
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
+ const types::global_dof_index starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
- Assert (*pointer != numbers::invalid_size_type,
+ Assert (*pointer != numbers::invalid_dof_index,
ExcInternalError());
unsigned int counter = 0;
// make sure we don't ask on
// unused vertices
Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
+ numbers::invalid_dof_index,
ExcInternalError());
// hop along the list of index
// sets and count the number of
// hops
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
+ const types::global_dof_index starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
- Assert (*pointer != numbers::invalid_unsigned_int,
+ Assert (*pointer != numbers::invalid_dof_index,
ExcInternalError());
unsigned int counter = 0;
// make sure we don't ask on
// unused vertices
Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
+ numbers::invalid_dof_index,
ExcInternalError());
// hop along the list of index
// sets and see whether we find
// the given index
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
+ const types::global_dof_index starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
- Assert (*pointer != numbers::invalid_unsigned_int,
+ Assert (*pointer != numbers::invalid_dof_index,
ExcInternalError());
while (true)
{
Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
- Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
+ Assert((*pointer)<std::numeric_limits<types::global_dof_index>::max(), ExcInternalError());
const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
Assert (this_fe_index < dof_handler.finite_elements->size(),
const unsigned int fe_index)
{
const DH &handler = accessor.get_dof_handler();
- Assert(handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
const FiniteElement<DH::dimension, DH::space_dimension> &fe
const unsigned int fe_index)
{
const DH &handler = accessor.get_dof_handler();
- Assert(handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
const FiniteElement<DH::dimension, DH::space_dimension> &fe = handler.get_fe ()[fe_index];
const unsigned int fe_index)
{
const DH &handler = accessor.get_dof_handler();
- Assert(handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
const FiniteElement<DH::dimension, DH::space_dimension> &fe = handler.get_fe ()[fe_index];
* If no level degrees of
* freedom have been assigned
* to this level, returns
- * numbers::invalid_unsigned_int. Else
+ * numbers::invalid_dof_index. Else
* returns the number of
* degrees of freedom on this level.
*/
* actual data format used to
* the present class.
*/
- std::vector<unsigned int> vertex_dofs_offsets;
+ std::vector<types::global_dof_index> vertex_dofs_offsets;
std::vector<MGVertexDoFs> mg_vertex_dofs;
types::global_dof_index
DoFHandler<dim,spacedim>::n_dofs (const unsigned int) const
{
- return numbers::invalid_unsigned_int;
+ return numbers::invalid_dof_index;
}
// make sure we are on an
// object for which DoFs have
// been allocated at all
- Assert (dof_offsets[obj_index] != numbers::invalid_unsigned_int,
+ Assert (dof_offsets[obj_index] != numbers::invalid_dof_index,
ExcMessage ("You are trying to access degree of freedom "
"information for an object on which no such "
"information is available"));
const types::global_dof_index *pointer = &dofs[starting_offset];
while (true)
{
- Assert (*pointer != numbers::invalid_unsigned_int,
+ Assert (*pointer != numbers::invalid_dof_index,
ExcInternalError());
if (*pointer == fe_index)
return *(pointer + 1 + local_index);
// make sure we are on an
// object for which DoFs have
// been allocated at all
- Assert (dof_offsets[obj_index] != numbers::invalid_unsigned_int,
+ Assert (dof_offsets[obj_index] != numbers::invalid_dof_index,
ExcMessage ("You are trying to access degree of freedom "
"information for an object on which no such "
"information is available"));
types::global_dof_index *pointer = &dofs[starting_offset];
while (true)
{
- Assert (*pointer != numbers::invalid_unsigned_int,
+ Assert (*pointer != numbers::invalid_dof_index,
ExcInternalError());
if (*pointer == fe_index)
{
// make sure we are on an
// object for which DoFs have
// been allocated at all
- if (dof_offsets[obj_index] == numbers::invalid_unsigned_int)
+ if (dof_offsets[obj_index] == numbers::invalid_dof_index)
return 0;
// if we are on a cell, then the
unsigned int counter = 0;
while (true)
{
- if (*pointer == numbers::invalid_unsigned_int)
+ if (*pointer == numbers::invalid_dof_index)
// end of list reached
return counter;
else
// make sure we are on an
// object for which DoFs have
// been allocated at all
- Assert (dof_offsets[obj_index] != numbers::invalid_unsigned_int,
+ Assert (dof_offsets[obj_index] != numbers::invalid_dof_index,
ExcMessage ("You are trying to access degree of freedom "
"information for an object on which no such "
"information is available"));
unsigned int counter = 0;
while (true)
{
- Assert (*pointer != numbers::invalid_unsigned_int,
+ Assert (*pointer != numbers::invalid_dof_index,
ExcInternalError());
const unsigned int fe_index = *pointer;
// make sure we are on an
// object for which DoFs have
// been allocated at all
- Assert (dof_offsets[obj_index] != numbers::invalid_unsigned_int,
+ Assert (dof_offsets[obj_index] != numbers::invalid_dof_index,
ExcMessage ("You are trying to access degree of freedom "
"information for an object on which no such "
"information is available"));
const types::global_dof_index *pointer = &dofs[starting_offset];
while (true)
{
- if (*pointer == numbers::invalid_unsigned_int)
+ if (*pointer == numbers::invalid_dof_index)
// end of list reached
return false;
else if (*pointer == fe_index)
current_block = parent.n_blocks();
index_within_block = 0;
next_break_backward = global_index;
- next_break_forward = numbers::invalid_unsigned_int;
+ next_break_forward = numbers::invalid_size_type;
};
}
// then move the next
// boundary arbitrarily far
// away
- next_break_forward = numbers::invalid_unsigned_int;
+ next_break_forward = numbers::invalid_size_type;
};
++global_index;
// get into unspecified terrain
{
--current_block;
- index_within_block = numbers::invalid_unsigned_int;
+ index_within_block = numbers::invalid_size_type;
next_break_forward = 0;
next_break_backward = 0;
};
* contains the position of the
* ConstraintLine of a constrained degree
* of freedom, or
- * numbers::invalid_unsigned_int if the
+ * numbers::invalid_size_type if the
* degree of freedom is not
* constrained. The
- * numbers::invalid_unsigned_int
- * return value returns thus whether
+ * numbers::invalid_size_type
+ * return value returns thus whether
* there is a constraint line for a given
* degree of freedom index. Note that
* this class has no notion of how many
// several terabytes of memory to
// resize the various arrays below
// :-)
- Assert (line != numbers::invalid_unsigned_int,
+ Assert (line != numbers::invalid_size_type,
ExcInternalError());
const size_type line_index = calculate_line_index (line);
// in any case: exit the function if an
// entry for this column already exists,
// since we don't want to enter it twice
- Assert (lines_cache[calculate_line_index(line)] != numbers::invalid_unsigned_int,
+ Assert (lines_cache[calculate_line_index(line)] != numbers::invalid_size_type,
ExcInternalError());
ConstraintLine *line_ptr = &lines[lines_cache[calculate_line_index(line)]];
Assert (line_ptr->line == line, ExcInternalError());
{
const size_type line_index = calculate_line_index(line);
Assert( line_index < lines_cache.size() &&
- lines_cache[line_index] != numbers::invalid_unsigned_int,
+ lines_cache[line_index] != numbers::invalid_size_type,
ExcMessage("call add_line() before calling set_inhomogeneity()"));
Assert(lines_cache[line_index] < lines.size(), ExcInternalError());
ConstraintLine *line_ptr = &lines[lines_cache[line_index]];
// that means computing the line index twice
const size_type line_index = calculate_line_index(index);
if (line_index >= lines_cache.size() ||
- lines_cache[line_index] == numbers::invalid_unsigned_int)
+ lines_cache[line_index] == numbers::invalid_size_type)
return false;
else
{
// that means computing the line index twice
const size_type line_index = calculate_line_index(line);
if (line_index >= lines_cache.size() ||
- lines_cache[line_index] == numbers::invalid_unsigned_int)
+ lines_cache[line_index] == numbers::invalid_size_type)
return 0;
else
return &lines[lines_cache[line_index]].entries;
// that means computing the line index twice
const size_type line_index = calculate_line_index(line);
if (line_index >= lines_cache.size() ||
- lines_cache[line_index] == numbers::invalid_unsigned_int)
+ lines_cache[line_index] == numbers::invalid_size_type)
return 0;
else
return lines[lines_cache[line_index]].inhomogeneity;
inline
MatrixBlock<MATRIX>::MatrixBlock()
:
- row(deal_II_numbers::invalid_unsigned_int),
- column(deal_II_numbers::invalid_unsigned_int)
+ row(deal_II_numbers::invalid_size_type),
+ column(deal_II_numbers::invalid_size_type)
{}
Assert (ierr == MPI_SUCCESS, ExcInternalError());
Number *read_position = import_data;
- std::vector<std::pair<unsigned int, size_type> >::const_iterator
+ std::vector<std::pair<size_type, size_type> >::const_iterator
my_imports = part.import_indices().begin();
// If add_ghost_data is set, add the imported
{
Assert (import_data != 0, ExcInternalError());
Number *write_position = import_data;
- std::vector<std::pair<unsigned int, size_type> >::const_iterator
+ std::vector<std::pair<size_type, size_type> >::const_iterator
my_imports = part.import_indices().begin();
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
for (size_type j=my_imports->first; j<my_imports->second; j++)
{
const size_type column = entry->column();
const size_type col_cell = additional_data->block_list.row_position(block, column);
- if (col_cell != numbers::invalid_unsigned_int)
+ if (col_cell != numbers::invalid_size_type)
M_cell(row_cell, col_cell) = entry->value();
}
}
const size_type N = this->m();
size_type jrow = 0;
- std::vector<size_type> iw (N, numbers::invalid_unsigned_int);
+ std::vector<size_type> iw (N, numbers::invalid_size_type);
for (size_type k=0; k<N; ++k)
{
for (; jj<ia[jrow+1]; ++jj)
{
const size_type jw = iw[ja[jj]];
- if (jw != numbers::invalid_unsigned_int)
+ if (jw != numbers::invalid_size_type)
luval[jw] -= t1 * luval[jj];
}
luval[ia[k]] = 1./luval[ia[k]];
for (size_type j=j1; j<=j2; ++j)
- iw[ja[j]] = numbers::invalid_unsigned_int;
+ iw[ja[j]] = numbers::invalid_size_type;
}
}
// if we have no constraints, should take the data from dof_indices
if (row_length_indicators(row) == 0)
{
- Assert (row_starts_plain_indices[row] == numbers::invalid_unsigned_int,
+ Assert (row_starts_plain_indices[row] == numbers::invalid_dof_index,
ExcInternalError());
return begin_indices(row);
}
VectorDoFTuple ()
{
for (unsigned int i=0; i<dim; ++i)
- dof_indices[i] = numbers::invalid_unsigned_int;
+ dof_indices[i] = numbers::invalid_dof_index;
}
// space and compress contiguous indices in
// form of ranges
{
- unsigned int last_index = numbers::invalid_unsigned_int-1;
- std::vector<std::pair<unsigned int,types::global_dof_index> > compressed_import_indices;
+ types::global_dof_index last_index = numbers::invalid_dof_index-1;
+ std::vector<std::pair<types::global_dof_index,types::global_dof_index> > compressed_import_indices;
for (unsigned int i=0; i<n_import_indices_data; i++)
{
Assert (expanded_import_indices[i] >= local_range_data.first &&
expanded_import_indices[i] < local_range_data.second,
ExcIndexRange(expanded_import_indices[i], local_range_data.first,
local_range_data.second));
- unsigned int new_index = (expanded_import_indices[i] -
+ types::global_dof_index new_index = (expanded_import_indices[i] -
local_range_data.first);
if (new_index == last_index+1)
compressed_import_indices.back().second++;
else
{
compressed_import_indices.push_back
- (std::pair<unsigned int,types::global_dof_index>(new_index,new_index+1));
+ (std::pair<types::global_dof_index,types::global_dof_index>(new_index,new_index+1));
}
last_index = new_index;
}
const bool reversed_numbering,
const std::vector<types::global_dof_index> &starting_indices)
{
- Assert(dof_handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(dof_handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
//TODO: we should be doing the same here as in the other compute_CMK function to preserve some memory
dof_handler.renumber_dofs (renumbering);
// for (unsigned int level=0;level<dof_handler.get_tria().n_levels();++level)
- // if (dof_handler.n_dofs(level) != numbers::invalid_unsigned_int)
+ // if (dof_handler.n_dofs(level) != numbers::invalid_dof_index)
// component_wise(dof_handler, level, component_order_arg);
}
const unsigned int level,
const std::vector<unsigned int> &component_order_arg)
{
- Assert(dof_handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(dof_handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
std::vector<types::global_dof_index> renumbering (dof_handler.n_dofs(level),
all_dof_counts(fe_collection.n_components() *
Utilities::MPI::n_mpi_processes (tria->get_communicator()));
- Assert (sizeof(types::global_dof_index) == sizeof(unsigned int),
- ExcNotImplemented());
MPI_Allgather ( &local_dof_count[0], n_buckets, MPI_UNSIGNED, &all_dof_counts[0],
n_buckets, MPI_UNSIGNED, tria->get_communicator());
void
block_wise (DoFHandler<dim> &dof_handler, const unsigned int level)
{
- Assert(dof_handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(dof_handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
std::vector<types::global_dof_index> renumbering (dof_handler.n_dofs(level),
// make sure that all local DoFs got new numbers assigned
Assert (std::find (renumbering.begin(), renumbering.end(),
- numbers::invalid_unsigned_int)
+ numbers::invalid_dof_index)
== renumbering.end(),
ExcInternalError());
const std::vector<bool> &selected_dofs,
const unsigned int level)
{
- Assert(dof_handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(dof_handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(level),
const std::vector<bool> &selected_dofs,
const unsigned int level)
{
- Assert(dof_handler.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(dof_handler.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
const unsigned int n_dofs = dof_handler.n_dofs(level);
const unsigned int level,
const typename std::vector<typename DH::level_cell_iterator> &cells)
{
- Assert(dof.n_dofs(level) != numbers::invalid_unsigned_int,
+ Assert(dof.n_dofs(level) != numbers::invalid_dof_index,
ExcNotInitialized());
std::vector<types::global_dof_index> renumbering(dof.n_dofs(level));
// same subdomain, then they will be in
// this order also after reordering
std::fill (new_dof_indices.begin(), new_dof_indices.end(),
- numbers::invalid_unsigned_int);
+ numbers::invalid_dof_index);
types::global_dof_index next_free_index = 0;
for (types::subdomain_id subdomain=0; subdomain<n_subdomains; ++subdomain)
for (types::global_dof_index i=0; i<n_dofs; ++i)
if (subdomain_association[i] == subdomain)
{
- Assert (new_dof_indices[i] == numbers::invalid_unsigned_int,
+ Assert (new_dof_indices[i] == numbers::invalid_dof_index,
ExcInternalError());
new_dof_indices[i] = next_free_index;
++next_free_index;
// we should have numbered all dofs
Assert (next_free_index == n_dofs, ExcInternalError());
Assert (std::find (new_dof_indices.begin(), new_dof_indices.end(),
- numbers::invalid_unsigned_int)
+ numbers::invalid_dof_index)
== new_dof_indices.end(),
ExcInternalError());
}
// check for a couple conditions that happened in parallel
// distributed mode
for (unsigned int row=0; row!=n_slave_dofs; ++row)
- Assert (slave_dofs[row] != numbers::invalid_unsigned_int,
+ Assert (slave_dofs[row] != numbers::invalid_dof_index,
ExcInternalError());
for (unsigned int col=0; col!=n_master_dofs; ++col)
- Assert (master_dofs[col] != numbers::invalid_unsigned_int,
+ Assert (master_dofs[col] != numbers::invalid_dof_index,
ExcInternalError());
// already fill the
// vertex_dofs_offsets field
dof_handler.vertex_dofs_offsets.resize (dof_handler.tria->n_vertices(),
- numbers::invalid_unsigned_int);
+ numbers::invalid_dof_index);
unsigned int vertex_slots_needed = 0;
for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
if (dof_handler.tria->vertex_used(v) == true)
{
- unsigned int pointer = dof_handler.vertex_dofs_offsets[v];
+ types::global_dof_index pointer = dof_handler.vertex_dofs_offsets[v];
for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
if (vertex_fe_association[fe][v] == true)
{
}
// finally place the end
// marker
- dof_handler.vertex_dofs[pointer] = numbers::invalid_unsigned_int;
+ dof_handler.vertex_dofs[pointer] = numbers::invalid_dof_index;
}
}
// line_dofs_offsets field
dof_handler.faces->lines.dof_offsets
.resize (dof_handler.tria->n_raw_lines(),
- numbers::invalid_unsigned_int);
+ numbers::invalid_dof_index);
unsigned int line_slots_needed = 0;
for (unsigned int line=0; line<dof_handler.tria->n_raw_lines(); ++line)
}
// finally place the end
// marker
- dof_handler.faces->lines.dofs[pointer] = numbers::invalid_unsigned_int;
+ dof_handler.faces->lines.dofs[pointer] = numbers::invalid_dof_index;
}
}
identities[i].second);
Assert ((new_dof_indices[higher_dof_index] ==
- numbers::invalid_unsigned_int)
+ numbers::invalid_dof_index)
||
(new_dof_indices[higher_dof_index] ==
lower_dof_index),
// slave to
// master
if (new_dof_indices[master_dof_index] !=
- numbers::invalid_unsigned_int)
+ numbers::invalid_dof_index)
{
Assert (new_dof_indices[new_dof_indices[master_dof_index]] ==
- numbers::invalid_unsigned_int,
+ numbers::invalid_dof_index,
ExcInternalError());
new_dof_indices[slave_dof_index]
else
{
Assert ((new_dof_indices[master_dof_index] ==
- numbers::invalid_unsigned_int)
+ numbers::invalid_dof_index)
||
(new_dof_indices[slave_dof_index] ==
master_dof_index),
= line->dof_index (identities[i].second, other_fe_index);
Assert ((new_dof_indices[master_dof_index] ==
- numbers::invalid_unsigned_int)
+ numbers::invalid_dof_index)
||
(new_dof_indices[slave_dof_index] ==
master_dof_index),
= quad->dof_index (identities[i].second, other_fe_index);
Assert ((new_dof_indices[master_dof_index] ==
- numbers::invalid_unsigned_int)
+ numbers::invalid_dof_index)
||
(new_dof_indices[slave_dof_index] ==
master_dof_index),
// lower-dimensional objects
// where elements come together
std::vector<types::global_dof_index>
- constrained_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
+ constrained_indices (number_cache.n_global_dofs, numbers::invalid_dof_index);
compute_vertex_dof_identities (constrained_indices);
compute_line_dof_identities (constrained_indices);
compute_quad_dof_identities (constrained_indices);
// new numbers to those which are
// not constrained
std::vector<types::global_dof_index>
- new_dof_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
+ new_dof_indices (number_cache.n_global_dofs, numbers::invalid_dof_index);
types::global_dof_index next_free_dof = 0;
for (types::global_dof_index i=0; i<number_cache.n_global_dofs; ++i)
- if (constrained_indices[i] == numbers::invalid_unsigned_int)
+ if (constrained_indices[i] == numbers::invalid_dof_index)
{
new_dof_indices[i] = next_free_dof;
++next_free_dof;
// are constrained and record the
// new dof number for those:
for (types::global_dof_index i=0; i<number_cache.n_global_dofs; ++i)
- if (constrained_indices[i] != numbers::invalid_unsigned_int)
+ if (constrained_indices[i] != numbers::invalid_dof_index)
{
Assert (new_dof_indices[constrained_indices[i]] !=
- numbers::invalid_unsigned_int,
+ numbers::invalid_dof_index,
ExcInternalError());
new_dof_indices[i] = new_dof_indices[constrained_indices[i]];
for (types::global_dof_index i=0; i<number_cache.n_global_dofs; ++i)
{
- Assert (new_dof_indices[i] != numbers::invalid_unsigned_int,
+ Assert (new_dof_indices[i] != numbers::invalid_dof_index,
ExcInternalError());
Assert (new_dof_indices[i] < next_free_dof,
ExcInternalError());
const unsigned int) const
{
Assert (false, ExcNotImplemented());
- return numbers::invalid_unsigned_int;
+ return numbers::invalid_dof_index;
}
}
{
- std::vector<unsigned int> tmp;
+ std::vector<types::global_dof_index> tmp;
std::swap (vertex_dofs_offsets, tmp);
}
}
// entry exists
if (colnums[k] == j) return k-rowstart[i];
}
- return numbers::invalid_unsigned_int;
+ return numbers::invalid_size_type;
}
{
/**
* Given a connectivity graph and a list of indices (where
- * invalid_unsigned_int indicates that a node has not been numbered yet),
+ * invalid_size_type indicates that a node has not been numbered yet),
* pick a valid starting index among the as-yet unnumbered one.
*/
size_type
const std::vector<size_type> &new_indices)
{
{
- size_type starting_point = numbers::invalid_unsigned_int;
+ size_type starting_point = numbers::invalid_size_type;
size_type min_coordination = sparsity.n_rows();
for (size_type row=0; row<sparsity.n_rows(); ++row)
// look over all as-yet unnumbered indices
- if (new_indices[row] == numbers::invalid_unsigned_int)
+ if (new_indices[row] == numbers::invalid_size_type)
{
SparsityPattern::iterator j = sparsity.begin(row);
//
// if that should be the case, we can chose an arbitrary dof as
// starting point, e.g. the first unnumbered one
- if (starting_point == numbers::invalid_unsigned_int)
+ if (starting_point == numbers::invalid_size_type)
{
for (size_type i=0; i<new_indices.size(); ++i)
- if (new_indices[i] == numbers::invalid_unsigned_int)
+ if (new_indices[i] == numbers::invalid_size_type)
{
starting_point = i;
break;
}
- Assert (starting_point != numbers::invalid_unsigned_int,
+ Assert (starting_point != numbers::invalid_size_type,
ExcInternalError());
}
// initialize the new_indices array with invalid values
std::fill (new_indices.begin(), new_indices.end(),
- numbers::invalid_unsigned_int);
+ numbers::invalid_size_type);
// delete disallowed elements
for (size_type i=0; i<last_round_dofs.size(); ++i)
- if ((last_round_dofs[i]==numbers::invalid_unsigned_int) ||
+ if ((last_round_dofs[i]==numbers::invalid_size_type) ||
(last_round_dofs[i]>=sparsity.n_rows()))
- last_round_dofs[i] = numbers::invalid_unsigned_int;
+ last_round_dofs[i] = numbers::invalid_size_type;
std::remove_if (last_round_dofs.begin(), last_round_dofs.end(),
std::bind2nd(std::equal_to<size_type>(),
- numbers::invalid_unsigned_int));
+ numbers::invalid_size_type));
// now if no valid points remain: find dof with lowest coordination number
if (last_round_dofs.empty())
// eliminate dofs which are
// already numbered
for (int s=next_round_dofs.size()-1; s>=0; --s)
- if (new_indices[next_round_dofs[s]] != numbers::invalid_unsigned_int)
+ if (new_indices[next_round_dofs[s]] != numbers::invalid_size_type)
next_round_dofs.erase (next_round_dofs.begin() + s);
// check whether there are
if (next_round_dofs.empty())
{
if (std::find (new_indices.begin(), new_indices.end(),
- numbers::invalid_unsigned_int)
+ numbers::invalid_size_type)
==
new_indices.end())
// no unnumbered
// front-marching-algorithm (which
// Cuthill-McKee actually is) has
// reached all points.
- Assert ((std::find (new_indices.begin(), new_indices.end(), numbers::invalid_unsigned_int)
+ Assert ((std::find (new_indices.begin(), new_indices.end(), numbers::invalid_size_type)
==
new_indices.end())
&&
temp_copy_indices.resize (0);
temp_copy_indices.resize (sizes[level][selected_block],
- numbers::invalid_unsigned_int);
+ numbers::invalid_dof_index);
// Compute coarse level right hand side
// by restricting from fine level.
const types::global_dof_index n_active_dofs =
std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
std::bind2nd(std::not_equal_to<types::global_dof_index>(),
- numbers::invalid_unsigned_int));
+ numbers::invalid_dof_index));
copy_indices[selected_block][level].resize (n_active_dofs);
types::global_dof_index counter = 0;
for (types::global_dof_index i=0; i<temp_copy_indices.size(); ++i)
- if (temp_copy_indices[i] != numbers::invalid_unsigned_int)
+ if (temp_copy_indices[i] != numbers::invalid_dof_index)
copy_indices[selected_block][level][counter++] =
std::pair<types::global_dof_index, unsigned int> (temp_copy_indices[i], i);
Assert (counter == n_active_dofs, ExcInternalError());
{
temp_copy_indices[block].resize (0);
temp_copy_indices[block].resize (sizes[level][block],
- numbers::invalid_unsigned_int);
+ numbers::invalid_dof_index);
}
// Compute coarse level right hand side
std::count_if (temp_copy_indices[block].begin(),
temp_copy_indices[block].end(),
std::bind2nd(std::not_equal_to<types::global_dof_index>(),
- numbers::invalid_unsigned_int));
+ numbers::invalid_dof_index));
copy_indices[block][level].resize (n_active_dofs);
types::global_dof_index counter = 0;
for (types::global_dof_index i=0; i<temp_copy_indices[block].size(); ++i)
- if (temp_copy_indices[block][i] != numbers::invalid_unsigned_int)
+ if (temp_copy_indices[block][i] != numbers::invalid_dof_index)
copy_indices[block][level][counter++] =
std::pair<types::global_dof_index, unsigned int>
(temp_copy_indices[block][i], i);
level_end = mg_dof.end_active(level);
temp_copy_indices.resize (0);
- temp_copy_indices.resize (mg_dof.n_dofs(level), numbers::invalid_unsigned_int);
+ temp_copy_indices.resize (mg_dof.n_dofs(level), numbers::invalid_dof_index);
// Compute coarse level right hand side
// by restricting from fine level.
const types::global_dof_index n_active_dofs =
std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
std::bind2nd(std::not_equal_to<types::global_dof_index>(),
- numbers::invalid_unsigned_int));
+ numbers::invalid_dof_index));
copy_to_and_from_indices[level].resize (n_active_dofs);
types::global_dof_index counter = 0;
for (types::global_dof_index i=0; i<temp_copy_indices.size(); ++i)
- if (temp_copy_indices[i] != numbers::invalid_unsigned_int)
+ if (temp_copy_indices[i] != numbers::invalid_dof_index)
copy_to_and_from_indices[level][counter++] =
std::pair<types::global_dof_index, unsigned int> (temp_copy_indices[i], i);
Assert (counter == n_active_dofs, ExcInternalError());
level_end = mg_dof.end_active(level);
temp_copy_indices.resize (0);
- temp_copy_indices.resize (mg_dof.n_dofs(level), numbers::invalid_unsigned_int);
+ temp_copy_indices.resize (mg_dof.n_dofs(level), numbers::invalid_dof_index);
// Compute coarse level right hand side
// by restricting from fine level.
const types::global_dof_index n_active_dofs =
std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
std::bind2nd(std::not_equal_to<types::global_dof_index>(),
- numbers::invalid_unsigned_int));
+ numbers::invalid_dof_index));
copy_indices[level].resize (n_active_dofs);
types::global_dof_index counter = 0;
for (types::global_dof_index i=0; i<temp_copy_indices.size(); ++i)
- if (temp_copy_indices[i] != numbers::invalid_unsigned_int)
+ if (temp_copy_indices[i] != numbers::invalid_dof_index)
copy_indices[level][counter++] =
std::pair<types::global_dof_index, unsigned int> (temp_copy_indices[i], i);
Assert (counter == n_active_dofs, ExcInternalError());
Threads::Mutex::ScopedLock lock (mutex);
for (unsigned int i=0; i<dofs_per_cell; ++i)
{
- if (dof_is_on_face[i] && dof_to_boundary_mapping[dofs[i]] != numbers::invalid_unsigned_int)
+ if (dof_is_on_face[i] && dof_to_boundary_mapping[dofs[i]] != numbers::invalid_dof_index)
{
for (unsigned int j=0; j<dofs_per_cell; ++j)
- if (dof_is_on_face[j] && dof_to_boundary_mapping[dofs[j]] != numbers::invalid_unsigned_int)
+ if (dof_is_on_face[j] && dof_to_boundary_mapping[dofs[j]] != numbers::invalid_dof_index)
{
Assert(numbers::is_finite(cell_matrix(i,j)), ExcNumberNotFinite());
matrix.add(dof_to_boundary_mapping[dofs[i]],