#include <dofs/dof_constraints.h>
+
+template <int blocks>
+void ConstraintMatrix::condense (BlockSparsityPattern<blocks,blocks> &sparsity) const
+{
+ Assert (false, ExcInternalError());
+/*
+ Assert (sorted == true, ExcMatrixNotClosed());
+ Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
+ Assert (sparsity.n_rows() == sparsity.n_cols(),
+ ExcMatrixNotSquare());
+ Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
+ ExcMatrixNotSquare());
+
+ // store for each index whether it
+ // must be distributed or not. If entry
+ // is -1, no distribution is necessary.
+ // otherwise, the number states which
+ // line in the constraint matrix handles
+ // this index
+ vector<int> distribute(sparsity.n_rows(), -1);
+
+ for (unsigned int c=0; c<lines.size(); ++c)
+ distribute[lines[c].line] = static_cast<signed int>(c);
+
+ int n_rows = sparsity.n_rows();
+ for (int row=0; row<n_rows; ++row)
+ {
+ if (distribute[row] == -1)
+ // regular line. loop over cols
+ for (unsigned int j=sparsity.get_rowstart_indices()[row];
+ j<sparsity.get_rowstart_indices()[row+1]; ++j)
+ {
+ const unsigned int column = sparsity.get_column_numbers()[j];
+
+ // end of row reached?
+ if (column == SparsityPattern::invalid_entry)
+ break;
+ else
+ if (distribute[column] != -1)
+ {
+ // distribute entry at regular
+ // row #row# and irregular column
+ // sparsity.colnums[j]
+ for (unsigned int q=0;
+ q!=lines[distribute[column]].entries.size();
+ ++q)
+ sparsity.add (row,
+ lines[distribute[column]].entries[q].first);
+ };
+ }
+ else
+ // row must be distributed
+ for (unsigned int j=sparsity.get_rowstart_indices()[row];
+ j<sparsity.get_rowstart_indices()[row+1]; ++j)
+ // end of row reached?
+ if (sparsity.get_column_numbers()[j] == SparsityPattern::invalid_entry)
+ break;
+ else
+ {
+ if (distribute[sparsity.get_column_numbers()[j]] == -1)
+ // distribute entry at irregular
+ // row #row# and regular column
+ // sparsity.colnums[j]
+ for (unsigned int q=0;
+ q!=lines[distribute[row]].entries.size(); ++q)
+ sparsity.add (lines[distribute[row]].entries[q].first,
+ sparsity.get_column_numbers()[j]);
+ else
+ // distribute entry at irregular
+ // row #row# and irregular column
+ // sparsity.get_column_numbers()[j]
+ for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
+ for (unsigned int q=0;
+ q!=lines[distribute[sparsity.get_column_numbers()[j]]]
+ .entries.size(); ++q)
+ sparsity.add (lines[distribute[row]].entries[p].first,
+ lines[distribute[sparsity.get_column_numbers()[j]]]
+ .entries[q].first);
+ };
+ };
+
+ sparsity.compress();
+*/
+};
+
+
+
template<typename number>
void
ConstraintMatrix::condense (const SparseMatrix<number> &uncondensed,
-template <int blocks>
-void ConstraintMatrix::condense (BlockSparsityPattern<blocks,blocks> &sparsity) const
-{
- Assert (false, ExcInternalError());
-/*
- Assert (sorted == true, ExcMatrixNotClosed());
- Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
- Assert (sparsity.n_rows() == sparsity.n_cols(),
- ExcMatrixNotSquare());
- Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
- ExcMatrixNotSquare());
-
- // store for each index whether it
- // must be distributed or not. If entry
- // is -1, no distribution is necessary.
- // otherwise, the number states which
- // line in the constraint matrix handles
- // this index
- vector<int> distribute(sparsity.n_rows(), -1);
-
- for (unsigned int c=0; c<lines.size(); ++c)
- distribute[lines[c].line] = static_cast<signed int>(c);
-
- int n_rows = sparsity.n_rows();
- for (int row=0; row<n_rows; ++row)
- {
- if (distribute[row] == -1)
- // regular line. loop over cols
- for (unsigned int j=sparsity.get_rowstart_indices()[row];
- j<sparsity.get_rowstart_indices()[row+1]; ++j)
- {
- const unsigned int column = sparsity.get_column_numbers()[j];
-
- // end of row reached?
- if (column == SparsityPattern::invalid_entry)
- break;
- else
- if (distribute[column] != -1)
- {
- // distribute entry at regular
- // row #row# and irregular column
- // sparsity.colnums[j]
- for (unsigned int q=0;
- q!=lines[distribute[column]].entries.size();
- ++q)
- sparsity.add (row,
- lines[distribute[column]].entries[q].first);
- };
- }
- else
- // row must be distributed
- for (unsigned int j=sparsity.get_rowstart_indices()[row];
- j<sparsity.get_rowstart_indices()[row+1]; ++j)
- // end of row reached?
- if (sparsity.get_column_numbers()[j] == SparsityPattern::invalid_entry)
- break;
- else
- {
- if (distribute[sparsity.get_column_numbers()[j]] == -1)
- // distribute entry at irregular
- // row #row# and regular column
- // sparsity.colnums[j]
- for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
- sparsity.add (lines[distribute[row]].entries[q].first,
- sparsity.get_column_numbers()[j]);
- else
- // distribute entry at irregular
- // row #row# and irregular column
- // sparsity.get_column_numbers()[j]
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
- q!=lines[distribute[sparsity.get_column_numbers()[j]]]
- .entries.size(); ++q)
- sparsity.add (lines[distribute[row]].entries[p].first,
- lines[distribute[sparsity.get_column_numbers()[j]]]
- .entries[q].first);
- };
- };
-
- sparsity.compress();
-*/
-};
-
-
-
unsigned int ConstraintMatrix::n_constraints () const
{
return lines.size();