template <int blocks>
void ConstraintMatrix::condense (BlockSparsityPattern<blocks,blocks> &sparsity) const
{
- Assert (false, ExcInternalError());
-/*
Assert (sorted == true, ExcMatrixNotClosed());
Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
Assert (sparsity.n_rows() == sparsity.n_cols(),
Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
ExcMatrixNotSquare());
+ const BlockIndices<blocks> &
+ index_mapping = sparsity.get_column_indices();
+
// store for each index whether it
// must be distributed or not. If entry
// is -1, no distribution is necessary.
// otherwise, the number states which
// line in the constraint matrix handles
// this index
- vector<int> distribute(sparsity.n_rows(), -1);
+ vector<int> distribute (sparsity.n_rows(), -1);
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
- int n_rows = sparsity.n_rows();
- for (int row=0; row<n_rows; ++row)
+ const unsigned int n_rows = sparsity.n_rows();
+ for (unsigned int row=0; row<n_rows; ++row)
{
+ // get index of this row
+ // within the blocks
+ const pair<unsigned int,unsigned int>
+ block_index = index_mapping.global_to_local(row);
+ const unsigned int block_row = block_index.first;
+
if (distribute[row] == -1)
- // regular line. loop over cols
- for (unsigned int j=sparsity.get_rowstart_indices()[row];
- j<sparsity.get_rowstart_indices()[row+1]; ++j)
- {
- const unsigned int column = sparsity.get_column_numbers()[j];
-
- // end of row reached?
- if (column == SparsityPattern::invalid_entry)
- break;
- else
- if (distribute[column] != -1)
- {
- // distribute entry at regular
- // row #row# and irregular column
- // sparsity.colnums[j]
- for (unsigned int q=0;
- q!=lines[distribute[column]].entries.size();
- ++q)
- sparsity.add (row,
- lines[distribute[column]].entries[q].first);
- };
- }
+ // regular line. loop over
+ // all columns and see
+ // whether this column must
+ // be distributed
+ {
+
+ // to loop over all entries
+ // in this row, we have to
+ // loop over all blocks in
+ // this blockrow and the
+ // corresponding row
+ // therein
+ for (unsigned int block_col=0; block_col<blocks; ++block_col)
+ {
+ const SparsityPattern &
+ block_sparsity = sparsity.block(block_row, block_col);
+
+ const unsigned int
+ first = block_sparsity.get_rowstart_indices()[block_index.second],
+ last = block_sparsity.get_rowstart_indices()[block_index.second+1];
+ for (unsigned int j=first; j<last; ++j)
+ // end of row reached?
+ if (block_sparsity.get_column_numbers()[j] == SparsityPattern::invalid_entry)
+ {
+ // nothing more
+ // to do
+ break;
+ }
+ else
+ {
+ const unsigned int global_col
+ = index_mapping.local_to_global(block_col,
+ block_sparsity.get_column_numbers()[j]);
+
+ if (distribute[global_col] != -1)
+ // distribute entry at regular
+ // row #row# and irregular column
+ // global_col
+ {
+ for (unsigned int q=0;
+ q!=lines[distribute[global_col]]
+ .entries.size(); ++q)
+ sparsity.add (row,
+ lines[distribute[global_col]].entries[q].first);
+ };
+ };
+ };
+ }
else
- // row must be distributed
- for (unsigned int j=sparsity.get_rowstart_indices()[row];
- j<sparsity.get_rowstart_indices()[row+1]; ++j)
- // end of row reached?
- if (sparsity.get_column_numbers()[j] == SparsityPattern::invalid_entry)
- break;
- else
+ {
+ // row must be
+ // distributed. split the
+ // whole row into the
+ // chunks defined by the
+ // blocks
+ for (unsigned int block_col=0; block_col<blocks; ++block_col)
{
- if (distribute[sparsity.get_column_numbers()[j]] == -1)
- // distribute entry at irregular
- // row #row# and regular column
- // sparsity.colnums[j]
- for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
- sparsity.add (lines[distribute[row]].entries[q].first,
- sparsity.get_column_numbers()[j]);
- else
- // distribute entry at irregular
- // row #row# and irregular column
- // sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
- q!=lines[distribute[sparsity.get_column_numbers()[j]]]
- .entries.size(); ++q)
- sparsity.add (lines[distribute[row]].entries[p].first,
- lines[distribute[sparsity.get_column_numbers()[j]]]
- .entries[q].first);
+ const SparsityPattern &
+ block_sparsity = sparsity.block(block_row,block_col);
+
+ const unsigned int
+ first = block_sparsity.get_rowstart_indices()[block_index.second],
+ last = block_sparsity.get_rowstart_indices()[block_index.second+1];
+
+ for (unsigned int j=first; j<last; ++j)
+ // end of row reached?
+ if (block_sparsity.get_column_numbers()[j] == SparsityPattern::invalid_entry)
+ {
+ // nothing more to do
+ break;
+ }
+ else
+ {
+ const unsigned int global_col
+ = index_mapping.local_to_global (block_col,
+ block_sparsity.get_column_numbers()[j]);
+
+ if (distribute[global_col] == -1)
+ // distribute entry at irregular
+ // row #row# and regular column
+ // global_col.
+ {
+ for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ sparsity.add (lines[distribute[row]].entries[q].first,
+ global_col);
+ }
+ else
+ // distribute entry at irregular
+ // row #row# and irregular column
+ // #global_col#
+ {
+ for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ sparsity.add (lines[distribute[row]].entries[p].first,
+ lines[distribute[global_col]].entries[q].first);
+ };
+ };
};
+ };
};
sparsity.compress();
-*/
};
vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
unsigned int shift = 0;
- unsigned int n_rows = uncondensed_struct.n_rows();
+ const unsigned int n_rows = uncondensed_struct.n_rows();
if (next_constraint == lines.end())
// if no constraint is to be handled
const unsigned int n_rows = sparsity.n_rows();
for (unsigned int row=0; row<n_rows; ++row)
{
+ // get index of this row
+ // within the blocks
+ const pair<unsigned int,unsigned int>
+ block_index = index_mapping.global_to_local(row);
+ const unsigned int block_row = block_index.first;
+
if (distribute[row] == -1)
+ // regular line. loop over
+ // all columns and see
+ // whether this column must
+ // be distributed
{
- // get index of this row
- // within the blocks
- const pair<unsigned int,unsigned int>
- block_index = index_mapping.global_to_local(row);
-
- // regular line. loop over
- // all columns and see
- // whether this column must
- // be distributed
+
+ // to loop over all entries
+ // in this row, we have to
+ // loop over all blocks in
+ // this blockrow and the
+ // corresponding row
+ // therein
for (unsigned int block_col=0; block_col<blocks; ++block_col)
{
const SparsityPattern &
- block_sparsity = sparsity.block(block_index.first,block_col);
+ block_sparsity = sparsity.block(block_row, block_col);
const unsigned int
first = block_sparsity.get_rowstart_indices()[block_index.second],
else
{
const unsigned int global_col
- =index_mapping.local_to_global(block_col,
- block_sparsity.get_column_numbers()[j]);
+ = index_mapping.local_to_global(block_col,
+ block_sparsity.get_column_numbers()[j]);
if (distribute[global_col] != -1)
// distribute entry at regular
// set old entry to zero
{
const double old_value =
- uncondensed.block(block_index.first,block_col).global_entry(j);
+ uncondensed.block(block_row,block_col).global_entry(j);
for (unsigned int q=0;
q!=lines[distribute[global_col]]
old_value *
lines[distribute[global_col]].entries[q].second);
- uncondensed.block(block_index.first,block_col).global_entry(j)
+ uncondensed.block(block_row,block_col).global_entry(j)
= 0.;
};
};
}
else
{
- Assert (false, ExcInternalError());
-/*
- // row must be distributed
- for (unsigned int j=sparsity.get_rowstart_indices()[row];
- j<sparsity.get_rowstart_indices()[row+1]; ++j)
- // end of row reached?
- if (sparsity.get_column_numbers()[j] == SparsityPattern::invalid_entry)
- {
- // this should not happen, since
- // we only operate on compressed
- // matrices!
- Assert (false, ExcMatrixNotClosed());
- break;
- }
- else
- {
- if (distribute[sparsity.get_column_numbers()[j]] == -1)
- // distribute entry at irregular
- // row #row# and regular column
- // sparsity.get_column_numbers()[j]. set old
- // entry to zero
+ // row must be
+ // distributed. split the
+ // whole row into the
+ // chunks defined by the
+ // blocks
+ for (unsigned int block_col=0; block_col<blocks; ++block_col)
+ {
+ const SparsityPattern &
+ block_sparsity = sparsity.block(block_row,block_col);
+
+ const unsigned int
+ first = block_sparsity.get_rowstart_indices()[block_index.second],
+ last = block_sparsity.get_rowstart_indices()[block_index.second+1];
+
+ for (unsigned int j=first; j<last; ++j)
+ // end of row reached?
+ if (block_sparsity.get_column_numbers()[j] == SparsityPattern::invalid_entry)
{
- for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
- uncondensed.add (lines[distribute[row]].entries[q].first,
- sparsity.get_column_numbers()[j],
- uncondensed.global_entry(j) *
- lines[distribute[row]].entries[q].second);
-
- uncondensed.global_entry(j) = 0.;
+ // this should not happen, since
+ // we only operate on compressed
+ // matrices!
+ Assert (false, ExcMatrixNotClosed());
+ break;
}
else
- // distribute entry at irregular
- // row #row# and irregular column
- // sparsity.get_column_numbers()[j]
- // set old entry to one if on main
- // diagonal, zero otherwise
{
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
- q!=lines[distribute[sparsity.get_column_numbers()[j]]]
- .entries.size(); ++q)
- uncondensed.add (lines[distribute[row]].entries[p].first,
- lines[distribute[sparsity.get_column_numbers()[j]]]
- .entries[q].first,
- uncondensed.global_entry(j) *
- lines[distribute[row]].entries[p].second *
- lines[distribute[sparsity.get_column_numbers()[j]]]
- .entries[q].second);
+ const unsigned int global_col
+ = index_mapping.local_to_global (block_col,
+ block_sparsity.get_column_numbers()[j]);
+
+ if (distribute[global_col] == -1)
+ // distribute entry at irregular
+ // row #row# and regular column
+ // global_col. set old
+ // entry to zero
+ {
+ const double old_value
+ = uncondensed.block(block_row,block_col).global_entry(j);
+
+ for (unsigned int q=0;
+ q!=lines[distribute[row]].entries.size(); ++q)
+ uncondensed.add (lines[distribute[row]].entries[q].first,
+ global_col,
+ old_value *
+ lines[distribute[row]].entries[q].second);
+
+ uncondensed.block(block_row,block_col).global_entry(j) = 0.;
+ }
+ else
+ // distribute entry at irregular
+ // row #row# and irregular column
+ // #global_col#
+ // set old entry to one if on main
+ // diagonal, zero otherwise
+ {
+ const double old_value
+ = uncondensed.block(block_row,block_col).global_entry(j);
+
+ for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+ uncondensed.add (lines[distribute[row]].entries[p].first,
+ lines[distribute[global_col]].entries[q].first,
+ old_value *
+ lines[distribute[row]].entries[p].second *
+ lines[distribute[global_col]].entries[q].second);
- uncondensed.global_entry(j) = (row == sparsity.get_column_numbers()[j] ?
- 1. : 0. );
+ uncondensed.block(block_row,block_col).global_entry(j)
+ = (row == global_col ? 1. : 0. );
+ };
};
- };
-*/
+ };
};
};
};