ConstraintLine *line_ptr = &lines[lines_cache[calculate_line_index(line)]];
Assert (line_ptr->line == line, ExcInternalError());
- // if in debug mode, check whether an
- // entry for this column already
- // exists and if its the same as
- // the one entered at present
+ // if in debug mode, check whether an entry for this column already
+ // exists and if its the same as the one entered at present
//
- // in any case: skip this entry if
- // an entry for this column already
- // exists, since we don't want to
- // enter it twice
+ // in any case: skip this entry if an entry for this column already
+ // exists, since we don't want to enter it twice
- for (std::vector<std::pair<unsigned int,double> >::const_iterator
+ for (std::vector<std::pair<size_type,double> >::const_iterator
col_val_pair = col_val_pairs.begin();
col_val_pair!=col_val_pairs.end(); ++col_val_pair)
{
// sort the lines
std::sort (lines.begin(), lines.end());
- // update list of pointers and give the
- // vector a sharp size since we won't
- // modify the size any more after this
- // point.
+ // update list of pointers and give the vector a sharp size since we
+ // won't modify the size any more after this point.
{
- std::vector<unsigned int> new_lines (lines_cache.size(),
- numbers::invalid_unsigned_int);
- unsigned int counter = 0;
+ std::vector<size_type> new_lines (lines_cache.size(),
+ numbers::invalid_size_type);
+ size_type counter = 0;
for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
line!=lines.end(); ++line, ++counter)
new_lines[calculate_line_index(line->line)] = counter;
std::swap (lines_cache, new_lines);
}
- // in debug mode: check whether we really
- // set the pointers correctly.
+ // in debug mode: check whether we really set the pointers correctly.
- for (unsigned int i=0; i<lines_cache.size(); ++i)
- if (lines_cache[i] != numbers::invalid_unsigned_int)
+ for (size_type i=0; i<lines_cache.size(); ++i)
+ if (lines_cache[i] != numbers::invalid_size_type)
Assert (i == calculate_line_index(lines[lines_cache[i]].line),
ExcInternalError());
}
#endif
- // replace references to dofs that
- // are themselves constrained. note
- // that because we may replace
- // references to other dofs that
- // may themselves be constrained to
- // third ones, we have to iterate
- // over all this until we replace
- // no chains of constraints any
- // more
+ // replace references to dofs that are themselves constrained. note that
+ // because we may replace references to other dofs that may themselves be
+ // constrained to third ones, we have to iterate over all this until we
+ // replace no chains of constraints any more
//
- // the iteration replaces
- // references to constrained
- // degrees of freedom by
- // second-order references. for
- // example if x3=x0/2+x2/2 and
- // x2=x0/2+x1/2, then the new list
- // will be x3=x0/2+x0/4+x1/4. note
- // that x0 appear twice. we will
- // throw this duplicate out in the
- // following step, where we sort
- // the list so that throwing out
- // duplicates becomes much more
- // efficient. also, we have to do
- // it only once, rather than in
- // each iteration
+ // the iteration replaces references to constrained degrees of freedom by
+ // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2,
+ // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear
+ // twice. we will throw this duplicate out in the following step, where
+ // we sort the list so that throwing out duplicates becomes much more
+ // efficient. also, we have to do it only once, rather than in each
+ // iteration
- unsigned int iteration = 0;
+ size_type iteration = 0;
while (true)
{
bool chained_constraint_replaced = false;
#ifdef DEBUG
// we need to keep track of how many replacements we do in this line, because we can
// end up in a cycle A->B->C->A without the number of entries growing.
- unsigned int n_replacements = 0;
+ size_type n_replacements = 0;
#endif
-
-
- // loop over all entries of
- // this line (including
- // ones that we have
- // appended in this go
- // around) and see whether
- // they are further
- // constrained. ignore
- // elements that we don't
- // store on the current
- // processor
+ // loop over all entries of this line (including ones that we
+ // have appended in this go around) and see whether they are
+ // further constrained. ignore elements that we don't store on
+ // the current processor
- unsigned int entry = 0;
+ size_type entry = 0;
while (entry < line->entries.size())
if (((local_lines.size() == 0)
||
&&
is_constrained (line->entries[entry].first))
{
- // ok, this entry is
- // further
- // constrained:
+ // ok, this entry is further constrained:
chained_constraint_replaced = true;
- // look up the chain
- // of constraints for
- // this entry
+ // look up the chain of constraints for this entry
- const unsigned int dof_index = line->entries[entry].first;
- const double weight = line->entries[entry].second;
+ const size_type dof_index = line->entries[entry].first;
+ const double weight = line->entries[entry].second;
Assert (dof_index != line->line,
ExcMessage ("Cycle in constraints detected!"));
{
std::sort (line->entries.begin(), line->entries.end());
- // loop over the now sorted list and
- // see whether any of the entries
- // references the same dofs more than
- // once in order to find how many
- // non-duplicate entries we have. This
- // lets us allocate the correct amount
- // of memory for the constraint
- // entries.
+ // loop over the now sorted list and see whether any of the entries
+ // references the same dofs more than once in order to find how many
+ // non-duplicate entries we have. This lets us allocate the correct
+ // amount of memory for the constraint entries.
- unsigned int duplicates = 0;
- for (unsigned int i=1; i<line->entries.size(); ++i)
+ size_type duplicates = 0;
+ for (size_type i=1; i<line->entries.size(); ++i)
if (line->entries[i].first == line->entries[i-1].first)
duplicates++;
Assert (new_entries.size() == line->entries.size() - duplicates,
ExcInternalError());
- // make sure there are
- // really no duplicates
- // left and that the list
- // is still sorted
+ // make sure there are really no duplicates left and that the
+ // list is still sorted
- for (unsigned int j=1; j<new_entries.size(); ++j)
+ for (size_type j=1; j<new_entries.size(); ++j)
{
Assert (new_entries[j].first != new_entries[j-1].first,
ExcInternalError());
line->entries.swap (new_entries);
}
- // finally do the following
- // check: if the sum of
- // weights for the
- // constraints is close to
- // one, but not exactly
- // one, then rescale all
- // the weights so that they
- // sum up to 1. this adds a
- // little numerical
- // stability and avoids all
- // sorts of problems where
- // the actual value is
- // close to, but not quite
- // what we expected
+ // finally do the following check: if the sum of weights for the
+ // constraints is close to one, but not exactly one, then rescale all
+ // the weights so that they sum up to 1. this adds a little numerical
+ // stability and avoids all sorts of problems where the actual value
+ // is close to, but not quite what we expected
//
- // the case where the
- // weights don't quite sum
- // up happens when we
- // compute the
- // interpolation weights
- // "on the fly", i.e. not
- // from precomputed
- // tables. in this case,
- // the interpolation
- // weights are also subject
- // to round-off
+ // the case where the weights don't quite sum up happens when we
+ // compute the interpolation weights "on the fly", i.e. not from
+ // precomputed tables. in this case, the interpolation weights are
+ // also subject to round-off
double sum = 0;
- for (unsigned int i=0; i<line->entries.size(); ++i)
+ for (size_type i=0; i<line->entries.size(); ++i)
sum += line->entries[i].second;
if ((sum != 1.0) && (std::fabs (sum-1.) < 1.e-13))
{
if (other_constraints.lines_cache.size() > lines_cache.size())
lines_cache.resize(other_constraints.lines_cache.size(),
- numbers::invalid_unsigned_int);
+ numbers::invalid_size_type);
- // first action is to fold into the present
- // object possible constraints in the
- // second object. we don't strictly need to
- // do this any more since the
- // ConstraintMatrix has learned to deal
- // with chains of constraints in the
- // close() function, but we have
- // traditionally done this and it's not
+ // first action is to fold into the present object possible constraints
+ // in the second object. we don't strictly need to do this any more since
+ // the ConstraintMatrix has learned to deal with chains of constraints in
+ // the close() function, but we have traditionally done this and it's not
// overly hard to do.
//
- // for this, loop over all
- // constraints and replace the
- // constraint lines with a new one
- // where constraints are replaced
- // if necessary.
+ // for this, loop over all constraints and replace the constraint lines
+ // with a new one where constraints are replaced if necessary.
ConstraintLine::Entries tmp;
for (std::vector<ConstraintLine>::iterator line=lines.begin();
line!=lines.end(); ++line)
{
tmp.clear ();
- for (unsigned int i=0; i<line->entries.size(); ++i)
+ for (size_type i=0; i<line->entries.size(); ++i)
{
- // if the present dof is not
- // constrained, or if we won't take
- // the constraint from the other
- // object, then simply copy it over
+ // if the present dof is not constrained, or if we won't take the
+ // constraint from the other object, then simply copy it over
if (other_constraints.is_constrained(line->entries[i].first) == false
||
((merge_conflict_behavior != right_object_wins)
++shift;
++next_constraint;
if (next_constraint == lines.end())
- // nothing more to do; finish rest
- // of loop
+ // nothing more to do; finish rest of loop
{
- for (unsigned int i=row+1; i<n_rows; ++i)
+ for (size_type i=row+1; i<n_rows; ++i)
new_line.push_back (i-shift);
break;
};
Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
Assert (sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
{
// regular line. loop over cols all valid cols. note that this
// changes the line we are presently working on: we add additional
entry->is_valid_entry());
++entry)
{
- const unsigned int column = entry->column();
+ const size_type column = entry->column();
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
{
- // distribute entry
- // at regular row
- // @p{row} and
- // irregular column
- // sparsity.colnums[j]
+ // distribute entry at regular row @p{row} and irregular
+ // column sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size();
++q)
sparsity.add (row,
for (SparsityPattern::iterator entry = sparsity.begin(row);
(entry != sparsity.end(row)) && entry->is_valid_entry(); ++entry)
{
- const unsigned int column = entry->column();
- if (distribute[column] == numbers::invalid_unsigned_int)
+ const size_type column = entry->column();
+ if (distribute[column] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // sparsity.colnums[j]
+ // distribute entry at irregular row @p{row} and regular
+ // column sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // sparsity.get_column_numbers()[j]
+ // distribute entry at irregular row @p{row} and irregular
+ // column sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[column]].entries[q].first);
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
- // regular line. loop over
- // cols. note that as we
- // proceed to distribute
- // cols, the loop may get
- // longer
+ // regular line. loop over cols. note that as we proceed to
+ // distribute cols, the loop may get longer
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
{
- // distribute entry
- // at regular row
- // @p{row} and
- // irregular column
- // column. note that
- // this changes the
- // line we are
- // presently working
- // on: we add
- // additional
- // entries. if we add
- // another entry at a
- // column behind the
- // present one, we
- // will encounter it
- // later on (but
- // since it can't be
- // further
- // constrained, won't
- // have to do
- // anything about
- // it). if we add it
- // up front of the
- // present column, we
- // will find the
- // present column
- // later on again as
- // it was shifted
- // back (again
- // nothing happens,
- // in particular no
- // endless loop, as
- // when we encounter
- // it the second time
- // we won't be able
- // to add more
- // entries as they
- // all already exist,
- // but we do the same
- // work more often
- // than necessary,
- // and the loop gets
- // longer), so move
- // the cursor one to
- // the right in the
- // case that we add
- // an entry up front
- // that did not exist
- // before. check
- // whether it existed
- // before by tracking
- // the length of this
- // row
+ // distribute entry at regular row @p{row} and irregular
+ // column column. note that this changes the line we are
+ // presently working on: we add additional entries. if we
+ // add another entry at a column behind the present one, we
+ // will encounter it later on (but since it can't be
+ // further constrained, won't have to do anything about
+ // it). if we add it up front of the present column, we
+ // will find the present column later on again as it was
+ // shifted back (again nothing happens, in particular no
+ // endless loop, as when we encounter it the second time we
+ // won't be able to add more entries as they all already
+ // exist, but we do the same work more often than
+ // necessary, and the loop gets longer), so move the cursor
+ // one to the right in the case that we add an entry up
+ // front that did not exist before. check whether it
+ // existed before by tracking the length of this row
- unsigned int old_rowlength = sparsity.row_length(row);
- for (unsigned int q=0;
+ size_type old_rowlength = sparsity.row_length(row);
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size();
++q)
{
}
else
// row must be distributed
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] == numbers::invalid_unsigned_int)
+ if (distribute[column] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // sparsity.colnums[j]
+ // distribute entry at irregular row @p{row} and regular
+ // column sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // sparsity.get_column_numbers()[j]
+ // distribute entry at irregular row @p{row} and irregular
+ // column sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[sparsity.column_number(row,j)]]
.entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
{
- // regular line. loop over
- // cols. note that as we proceed to
- // distribute cols, the loop may
- // get longer
+ // regular line. loop over cols. note that as we proceed to
+ // distribute cols, the loop may get longer
CompressedSetSparsityPattern::row_iterator col_num = sparsity.row_begin (row);
for (; col_num != sparsity.row_end (row); ++col_num)
for (; col_num != sparsity.row_end (row); ++col_num)
{
- const unsigned int column = *col_num;
+ const size_type column = *col_num;
- if (distribute[column] == numbers::invalid_unsigned_int)
+ if (distribute[column] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // sparsity.colnums[j]
+ // distribute entry at irregular row @p{row} and regular
+ // column sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // sparsity.get_column_numbers()[j]
+ // distribute entry at irregular row @p{row} and irregular
+ // column sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[column]]
.entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute(sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute(sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
- // regular line. loop over
- // cols. note that as we
- // proceed to distribute
- // cols, the loop may get
- // longer
+ // regular line. loop over cols. note that as we proceed to
+ // distribute cols, the loop may get longer
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] != numbers::invalid_unsigned_int)
+ if (distribute[column] != numbers::invalid_size_type)
{
- // distribute entry
- // at regular row
- // @p{row} and
- // irregular column
- // column. note that
- // this changes the
- // line we are
- // presently working
- // on: we add
- // additional
- // entries. if we add
- // another entry at a
- // column behind the
- // present one, we
- // will encounter it
- // later on (but
- // since it can't be
- // further
- // constrained, won't
- // have to do
- // anything about
- // it). if we add it
- // up front of the
- // present column, we
- // will find the
- // present column
- // later on again as
- // it was shifted
- // back (again
- // nothing happens,
- // in particular no
- // endless loop, as
- // when we encounter
- // it the second time
- // we won't be able
- // to add more
- // entries as they
- // all already exist,
- // but we do the same
- // work more often
- // than necessary,
- // and the loop gets
- // longer), so move
- // the cursor one to
- // the right in the
- // case that we add
- // an entry up front
- // that did not exist
- // before. check
- // whether it existed
- // before by tracking
- // the length of this
- // row
+ // distribute entry at regular row @p{row} and irregular
+ // column column. note that this changes the line we are
+ // presently working on: we add additional entries. if we
+ // add another entry at a column behind the present one, we
+ // will encounter it later on (but since it can't be
+ // further constrained, won't have to do anything about
+ // it). if we add it up front of the present column, we
+ // will find the present column later on again as it was
+ // shifted back (again nothing happens, in particular no
+ // endless loop, as when we encounter it the second time we
+ // won't be able to add more entries as they all already
+ // exist, but we do the same work more often than
+ // necessary, and the loop gets longer), so move the cursor
+ // one to the right in the case that we add an entry up
+ // front that did not exist before. check whether it
+ // existed before by tracking the length of this row
- unsigned int old_rowlength = sparsity.row_length(row);
- for (unsigned int q=0;
+ size_type old_rowlength = sparsity.row_length(row);
+ for (size_type q=0;
q!=lines[distribute[column]].entries.size();
++q)
{
}
else
// row must be distributed
- for (unsigned int j=0; j<sparsity.row_length(row); ++j)
+ for (size_type j=0; j<sparsity.row_length(row); ++j)
{
- const unsigned int column = sparsity.column_number(row,j);
+ const size_type column = sparsity.column_number(row,j);
- if (distribute[column] == numbers::invalid_unsigned_int)
+ if (distribute[column] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // sparsity.colnums[j]
+ // distribute entry at irregular row @p{row} and regular
+ // column sparsity.colnums[j]
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // sparsity.get_column_numbers()[j]
+ // distribute entry at irregular row @p{row} and irregular
+ // column sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0;
q!=lines[distribute[sparsity.column_number(row,j)]]
.entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- // get index of this row
- // within the blocks
+ // get index of this row within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
+ const size_type block_row = block_index.first;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
- // regular line. loop over
- // all columns and see
- // whether this column must
- // be distributed
+ // regular line. loop over all columns and see whether this column
+ // must be distributed
{
- // to loop over all entries
- // in this row, we have to
- // loop over all blocks in
- // this blockrow and the
- // corresponding row
- // therein
+ // to loop over all entries in this row, we have to loop over all
+ // blocks in this blockrow and the corresponding row therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const SparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
entry->is_valid_entry();
++entry)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global(block_col, entry->column());
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
- // distribute entry at regular
- // row @p{row} and irregular column
- // global_col
+ // distribute entry at regular row @p{row} and
+ // irregular column global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (row,
lines[distribute[global_col]].entries[q].first);
}
else
{
- // row must be
- // distributed. split the
- // whole row into the
- // chunks defined by the
- // blocks
+ // row must be distributed. split the whole row into the chunks
+ // defined by the blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const SparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
entry->is_valid_entry();
++entry)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global (block_col, entry->column());
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // global_col.
+ // distribute entry at irregular row @p{row} and
+ // regular column global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first, global_col);
}
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // @p{global_col}
+ // distribute entry at irregular row @p{row} and
+ // irregular column @p{global_col}
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
}
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- // get index of this row
- // within the blocks
+ // get index of this row within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
- const unsigned int local_row = block_index.second;
+ const size_type block_row = block_index.first;
+ const size_type local_row = block_index.second;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
- // regular line. loop over
- // all columns and see
- // whether this column must
- // be distributed. note that
- // as we proceed to
- // distribute cols, the loop
- // over cols may get longer.
+ // regular line. loop over all columns and see whether this column
+ // must be distributed. note that as we proceed to distribute cols,
+ // the loop over cols may get longer.
//
- // don't try to be clever
- // here as in the algorithm
- // for the
- // CompressedSparsityPattern,
- // as that would be much more
- // complicated here. after
- // all, we know that
- // compressed patterns are
- // inefficient...
+ // don't try to be clever here as in the algorithm for the
+ // CompressedSparsityPattern, as that would be much more
+ // complicated here. after all, we know that compressed patterns
+ // are inefficient...
{
- // to loop over all entries
- // in this row, we have to
- // loop over all blocks in
- // this blockrow and the
- // corresponding row
- // therein
+ // to loop over all entries in this row, we have to loop over all
+ // blocks in this blockrow and the corresponding row therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
= index_mapping.local_to_global(block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
- // distribute entry at regular
- // row @p{row} and irregular column
- // global_col
+ // distribute entry at regular row @p{row} and
+ // irregular column global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]]
.entries.size(); ++q)
sparsity.add (row,
}
else
{
- // row must be
- // distributed. split the
- // whole row into the
- // chunks defined by the
- // blocks
+ // row must be distributed. split the whole row into the chunks
+ // defined by the blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
= index_mapping.local_to_global (block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // global_col.
+ // distribute entry at irregular row @p{row} and
+ // regular column global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // @p{global_col}
+ // distribute entry at irregular row @p{row} and
+ // irregular column @p{global_col}
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
};
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- // get index of this row
- // within the blocks
+ // get index of this row within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
- const unsigned int local_row = block_index.second;
+ const size_type block_row = block_index.first;
+ const size_type local_row = block_index.second;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
- // regular line. loop over
- // all columns and see
- // whether this column must
- // be distributed. note that
- // as we proceed to
- // distribute cols, the loop
- // over cols may get longer.
+ // regular line. loop over all columns and see whether this column
+ // must be distributed. note that as we proceed to distribute cols,
+ // the loop over cols may get longer.
//
- // don't try to be clever
- // here as in the algorithm
- // for the
- // CompressedSparsityPattern,
- // as that would be much more
- // complicated here. after
- // all, we know that
- // compressed patterns are
- // inefficient...
+ // don't try to be clever here as in the algorithm for the
+ // CompressedSparsityPattern, as that would be much more
+ // complicated here. after all, we know that compressed patterns
+ // are inefficient...
{
- // to loop over all entries
- // in this row, we have to
- // loop over all blocks in
- // this blockrow and the
- // corresponding row
- // therein
+ // to loop over all entries in this row, we have to loop over all
+ // blocks in this blockrow and the corresponding row therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSetSparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
j = block_sparsity.row_begin(local_row);
j != block_sparsity.row_end(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global(block_col, *j);
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
- // distribute entry at regular
- // row @p{row} and irregular column
- // global_col
+ // distribute entry at regular row @p{row} and
+ // irregular column global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]]
.entries.size(); ++q)
sparsity.add (row,
}
else
{
- // row must be
- // distributed. split the
- // whole row into the
- // chunks defined by the
- // blocks
+ // row must be distributed. split the whole row into the chunks
+ // defined by the blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSetSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
j = block_sparsity.row_begin(local_row);
j != block_sparsity.row_end(local_row); ++j)
{
- const unsigned int global_col
+ const size_type global_col
= index_mapping.local_to_global (block_col, *j);
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // global_col.
+ // distribute entry at irregular row @p{row} and
+ // regular column global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // @p{global_col}
+ // distribute entry at irregular row @p{row} and
+ // irregular column @p{global_col}
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
};
const BlockIndices &
index_mapping = sparsity.get_column_indices();
- const unsigned int n_blocks = sparsity.n_block_rows();
+ const size_type n_blocks = sparsity.n_block_rows();
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_size_type,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
+ // store for each index whether it must be distributed or not. If entry
+ // is numbers::invalid_unsigned_int, no distribution is necessary.
+ // otherwise, the number states which line in the constraint matrix
+ // handles this index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
+ std::vector<size_type> distribute (sparsity.n_rows(),
+ numbers::invalid_size_type);
- for (unsigned int c=0; c<lines.size(); ++c)
+ for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
+ const size_type n_rows = sparsity.n_rows();
+ for (size_type row=0; row<n_rows; ++row)
{
- // get index of this row
- // within the blocks
+ // get index of this row within the blocks
- const std::pair<unsigned int,unsigned int>
+ const std::pair<size_type,size_type>
block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
- const unsigned int local_row = block_index.second;
+ const size_type block_row = block_index.first;
+ const size_type local_row = block_index.second;
- if (distribute[row] == numbers::invalid_unsigned_int)
+ if (distribute[row] == numbers::invalid_size_type)
- // regular line. loop over
- // all columns and see
- // whether this column must
- // be distributed. note that
- // as we proceed to
- // distribute cols, the loop
- // over cols may get longer.
+ // regular line. loop over all columns and see whether this column
+ // must be distributed. note that as we proceed to distribute cols,
+ // the loop over cols may get longer.
//
- // don't try to be clever
- // here as in the algorithm
- // for the
- // CompressedSparsityPattern,
- // as that would be much more
- // complicated here. after
- // all, we know that
- // compressed patterns are
- // inefficient...
+ // don't try to be clever here as in the algorithm for the
+ // CompressedSparsityPattern, as that would be much more
+ // complicated here. after all, we know that compressed patterns
+ // are inefficient...
{
- // to loop over all entries
- // in this row, we have to
- // loop over all blocks in
- // this blockrow and the
- // corresponding row
- // therein
+ // to loop over all entries in this row, we have to loop over all
+ // blocks in this blockrow and the corresponding row therein
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSimpleSparsityPattern &
block_sparsity = sparsity.block(block_row, block_col);
= index_mapping.local_to_global(block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] != numbers::invalid_unsigned_int)
+ if (distribute[global_col] != numbers::invalid_size_type)
- // distribute entry at regular
- // row @p{row} and irregular column
- // global_col
+ // distribute entry at regular row @p{row} and
+ // irregular column global_col
{
- for (unsigned int q=0;
+ for (size_type q=0;
q!=lines[distribute[global_col]]
.entries.size(); ++q)
sparsity.add (row,
}
else
{
- // row must be
- // distributed. split the
- // whole row into the
- // chunks defined by the
- // blocks
+ // row must be distributed. split the whole row into the chunks
+ // defined by the blocks
- for (unsigned int block_col=0; block_col<n_blocks; ++block_col)
+ for (size_type block_col=0; block_col<n_blocks; ++block_col)
{
const CompressedSimpleSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
= index_mapping.local_to_global (block_col,
block_sparsity.column_number(local_row,j));
- if (distribute[global_col] == numbers::invalid_unsigned_int)
+ if (distribute[global_col] == numbers::invalid_size_type)
- // distribute entry at irregular
- // row @p{row} and regular column
- // global_col.
- {
- for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ // distribute entry at irregular row @p{row} and
+ // regular column global_col.
- { for (unsigned int q=0;
++ { for (size_type q=0;
+ q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
else
- // distribute entry at irregular
- // row @p{row} and irregular column
- // @p{global_col}
- {
- for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ // distribute entry at irregular row @p{row} and
+ // irregular column @p{global_col}
- { for (unsigned int p=0;
++ { for (size_type int p=0;
+ p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
lines[distribute[global_col]].entries[q].first);
};
const constraint_iterator end_my_constraints
= Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
- // Here we search all the indices that we
- // need to have read-access to - the
- // local nodes and all the nodes that the
- // constraints indicate.
+ // Here we search all the indices that we need to have read-access to -
+ // the local nodes and all the nodes that the constraints indicate.
IndexSet my_indices (vec.size());
{
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
local_range = vec.local_range();
my_indices.add_range (local_range.first, local_range.second);
for (constraint_iterator it = begin_my_constraints;
it != end_my_constraints; ++it)
{
- // fill entry in line
- // next_constraint.line by adding the
- // different contributions
+ // fill entry in line next_constraint.line by adding the different
+ // contributions
double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
new_value += (vec_distribute(it->entries[i].first) *
it->entries[i].second);
vec(it->line) = new_value;
const constraint_iterator end_my_constraints
= Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
- // Here we search all the indices that we
- // need to have read-access to - the local
- // nodes and all the nodes that the
- // constraints indicate. No caching done
- // yet. would need some more clever data
- // structures for doing that.
+ // Here we search all the indices that we need to have read-access to
+ // - the local nodes and all the nodes that the constraints indicate.
+ // No caching done yet. would need some more clever data structures
+ // for doing that.
- const std::pair<unsigned int, unsigned int>
+ const std::pair<size_type, size_type>
local_range = vec.block(block).local_range();
my_indices.add_range (local_range.first, local_range.second);
for (constraint_iterator it = begin_my_constraints;
it != end_my_constraints; ++it)
{
- // fill entry in line
- // next_constraint.line by adding the
+ // fill entry in line next_constraint.line by adding the
// different contributions
double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
new_value += (vec_distribute(it->entries[i].first) *
it->entries[i].second);
vec(it->line) = new_value;
for (constraint_iterator it = begin_my_constraints;
it != end_my_constraints; ++it)
{
- // fill entry in line
- // next_constraint.line by adding the
- // different contributions
+ // fill entry in line next_constraint.line by adding the different
+ // contributions
PetscScalar new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
+ for (size_type i=0; i<it->entries.size(); ++i)
new_value += (PetscScalar(ghost_vec(it->entries[i].first)) *
it->entries[i].second);
vec(it->line) = new_value;
-unsigned int ConstraintMatrix::max_constraint_indirections () const
+ConstraintMatrix::size_type
+ConstraintMatrix::max_constraint_indirections () const
{
- unsigned int return_value = 0;
+ size_type return_value = 0;
for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
i!=lines.end(); ++i)
- // use static cast, since
- // typeof(size)==std::size_t, which is !=
+ // use static cast, since typeof(size)==std::size_t, which is !=
- // unsigned int on AIX
+ // size_type on AIX
return_value = std::max(return_value,
- static_cast<unsigned int>(i->entries.size()));
+ static_cast<size_type>(i->entries.size()));
return return_value;
}
void ConstraintMatrix::print (std::ostream &out) const
{
- for (unsigned int i=0; i!=lines.size(); ++i)
+ for (size_type i=0; i!=lines.size(); ++i)
{
- // output the list of
- // constraints as pairs of dofs
- // and their weights
+ // output the list of constraints as pairs of dofs and their weights
if (lines[i].entries.size() > 0)
{
- for (unsigned int j=0; j<lines[i].entries.size(); ++j)
+ for (size_type j=0; j<lines[i].entries.size(); ++j)
out << " " << lines[i].line
<< " " << lines[i].entries[j].first
<< ": " << lines[i].entries[j].second << "\n";
{
out << "digraph constraints {"
<< std::endl;
- for (unsigned int i=0; i!=lines.size(); ++i)
+ for (size_type i=0; i!=lines.size(); ++i)
{
- // same concept as in the
- // previous function
+ // same concept as in the previous function
if (lines[i].entries.size() > 0)
- for (unsigned int j=0; j<lines[i].entries.size(); ++j)
+ for (size_type j=0; j<lines[i].entries.size(); ++j)
out << " " << lines[i].line << "->" << lines[i].entries[j].first
<< "; // weight: "
<< lines[i].entries[j].second