/**
* Add a whole series of entries,
- * denoted by pairs of column
- * indices and values, to a line
- * of constraints. This function
- * is equivalent to calling the
- * preceeding function
- * several times, but is faster.
+ * denoted by pairs of column indices
+ * and values, to a line of
+ * constraints. This function is
+ * equivalent to calling the preceeding
+ * function several times, but is
+ * faster.
*/
void add_entries (const unsigned int line,
const std::vector<std::pair<unsigned int,double> > &col_val_pairs);
/**
* Set an imhomogeneity to the
- * constraint line <i>i</i>,
- * according to the discussion in
- * the general class description.
+ * constraint line <i>i</i>, according
+ * to the discussion in the general
+ * class description.
*/
void set_inhomogeneity (const unsigned int line,
const double value);
/**
- * Close the filling of
- * entries. Since the lines of a
- * matrix of this type are
- * usually filled in an arbitrary
- * order and since we do not want
- * to use associative constainers
- * to store the lines, we need to
- * sort the lines and within the
- * lines the columns before usage
- * of the matrix. This is done
- * through this function.
+ * Close the filling of entries. Since
+ * the lines of a matrix of this type
+ * are usually filled in an arbitrary
+ * order and since we do not want to
+ * use associative constainers to store
+ * the lines, we need to sort the lines
+ * and within the lines the columns
+ * before usage of the matrix. This is
+ * done through this function.
*
- * Also, zero entries are
- * discarded, since they are not
- * needed.
+ * Also, zero entries are discarded,
+ * since they are not needed.
*
- * After closing, no more entries
- * are accepted. If the object
- * was already closed, then this
- * function returns immediately.
+ * After closing, no more entries are
+ * accepted. If the object was already
+ * closed, then this function returns
+ * immediately.
*
- * This function also resolves
- * chains of constraints. For
- * example, degree of freedom 13
- * may be constrained to
- * $u_{13}=u_3/2+u_7/2$ while
- * degree of freedom 7 is itself
- * constrained as
+ * This function also resolves chains
+ * of constraints. For example, degree
+ * of freedom 13 may be constrained to
+ * $u_{13}=u_3/2+u_7/2$ while degree of
+ * freedom 7 is itself constrained as
* $u_7=u_2/2+u_4/2$. Then, the
* resolution will be that
* $u_{13}=u_3/2+u_2/4+u_4/4$. Note,
- * however, that cycles in this
- * graph of constraints are not
- * allowed, i.e. for example
- * $u_4$ may not be constrained,
- * directly or indirectly, to
- * $u_{13}$ again.
+ * however, that cycles in this graph
+ * of constraints are not allowed,
+ * i.e. for example $u_4$ may not be
+ * constrained, directly or indirectly,
+ * to $u_{13}$ again.
*/
void close ();
/**
- * Merge the constraints
- * represented by the object
- * given as argument into the
- * constraints represented by
- * this object. Both objects may
- * or may not be closed (by
- * having their function
- * @p close called before). If
- * this object was closed before,
- * then it will be closed
- * afterwards as well. Note,
- * however, that if the other
- * argument is closed, then
- * merging may be significantly
- * faster.
- *
- * Note that the constraints in
- * each of the two objects (the
- * old one represented by this
- * object and the argument) may
- * not refer to the same degree
- * of freedom, i.e. a degree of
- * freedom that is constrained in
- * one object may not be
- * constrained in the second. If
- * this is nevertheless the case,
- * an exception is thrown.
+ * Merge the constraints represented by
+ * the object given as argument into
+ * the constraints represented by this
+ * object. Both objects may or may not
+ * be closed (by having their function
+ * @p close called before). If this
+ * object was closed before, then it
+ * will be closed afterwards as
+ * well. Note, however, that if the
+ * other argument is closed, then
+ * merging may be significantly faster.
*
- * However, the following is
- * possible: if DoF @p x is
- * constrained to dofs @p x_i
- * for some set of indices @p i,
- * then the DoFs @p x_i may be
- * further constrained by the
- * constraints object given as
- * argument, although not to
- * other DoFs that are
- * constrained in either of the
- * two objects. Note that it is
- * not possible that the DoFs
- * @p x_i are constrained within
- * the present object.
+ * Note that the constraints in each of
+ * the two objects (the old one
+ * represented by this object and the
+ * argument) may not refer to the same
+ * degree of freedom, i.e. a degree of
+ * freedom that is constrained in one
+ * object may not be constrained in the
+ * second. If this is nevertheless the
+ * case, an exception is thrown.
*
- * Because of simplicity of
- * implementation, and also to
- * avoid cycles, this operation
- * is not symmetric: degrees of
- * freedom that are constrained
- * in the given argument object
- * may not be constrained to DoFs
- * that are themselves
+ * However, the following is possible:
+ * if DoF @p x is constrained to dofs
+ * @p x_i for some set of indices @p i,
+ * then the DoFs @p x_i may be further
+ * constrained by the constraints
+ * object given as argument, although
+ * not to other DoFs that are
+ * constrained in either of the two
+ * objects. Note that it is not
+ * possible that the DoFs @p x_i are
* constrained within the present
* object.
*
- * The aim of these merging
- * operations is that if, for
- * example, you have hanging
- * nodes that are constrained to
- * the degrees of freedom
- * adjacent to them, you cannot
- * originally, i.e. within one
- * object, constrain these
- * adjacent nodes
+ * Because of simplicity of
+ * implementation, and also to avoid
+ * cycles, this operation is not
+ * symmetric: degrees of freedom that
+ * are constrained in the given
+ * argument object may not be
+ * constrained to DoFs that are
+ * themselves constrained within the
+ * present object.
+ *
+ * The aim of these merging operations
+ * is that if, for example, you have
+ * hanging nodes that are constrained
+ * to the degrees of freedom adjacent
+ * to them, you cannot originally,
+ * i.e. within one object, constrain
+ * these adjacent nodes
* further. However, that may be
- * desirable in some cases, for
- * example if they belong to a
- * symmetry boundary for which
- * the nodes on one side of the
- * domain should have the same
- * values as those on the other
- * side. In that case, you would
- * first construct a costraints
- * object holding the hanging
- * nodes constraints, and a
- * second one that contains the
- * constraints due to the
- * symmetry boundary. You would
- * then finally merge this second
- * one into the first, possibly
- * eliminating constraints of
- * hanging nodes to adjacent
- * boundary nodes by constraints
- * to nodes at the opposite
+ * desirable in some cases, for example
+ * if they belong to a symmetry
+ * boundary for which the nodes on one
+ * side of the domain should have the
+ * same values as those on the other
+ * side. In that case, you would first
+ * construct a costraints object
+ * holding the hanging nodes
+ * constraints, and a second one that
+ * contains the constraints due to the
+ * symmetry boundary. You would then
+ * finally merge this second one into
+ * the first, possibly eliminating
+ * constraints of hanging nodes to
+ * adjacent boundary nodes by
+ * constraints to nodes at the opposite
* boundary.
*/
void merge (const ConstraintMatrix &other_constraints);
/**
- * Shift all entries of this
- * matrix down @p offset rows
- * and over @p offset columns.
+ * Shift all entries of this matrix
+ * down @p offset rows and over @p
+ * offset columns.
*
- * This function is useful if you
- * are building block matrices,
- * where all blocks are built by
- * the same @p DoFHandler
- * object, i.e. the matrix size
- * is larger than the number of
- * degrees of freedom. Since
- * several matrix rows and
- * columns correspond to the same
- * degrees of freedom, you'd
- * generate several constraint
+ * This function is useful if you are
+ * building block matrices, where all
+ * blocks are built by the same @p
+ * DoFHandler object, i.e. the matrix
+ * size is larger than the number of
+ * degrees of freedom. Since several
+ * matrix rows and columns correspond
+ * to the same degrees of freedom,
+ * you'd generate several constraint
* objects, then shift them, and
- * finally @p merge them
- * together again.
+ * finally @p merge them together
+ * again.
*/
void shift (const unsigned int offset);
/**
- * Clear all entries of this matrix. Reset
- * the flag determining whether new entries
- * are accepted or not.
+ * Clear all entries of this
+ * matrix. Reset the flag determining
+ * whether new entries are accepted or
+ * not.
*
* This function may be called also on
* objects which are empty or already
unsigned int n_constraints () const;
/**
- * Return whether the degree of
- * freedom with number @p index is
- * a constrained one.
+ * Return whether the degree of freedom
+ * with number @p index is a
+ * constrained one.
*
- * Note that if @p close was
- * called before, then this
- * function is significantly
- * faster, since then the
- * constrained degrees of freedom
- * are sorted and we can do a
- * binary search, while before
- * @p close was called, we have to
- * perform a linear search
- * through all entries.
+ * Note that if @p close was called
+ * before, then this function is
+ * significantly faster, since then the
+ * constrained degrees of freedom are
+ * sorted and we can do a binary
+ * search, while before @p close was
+ * called, we have to perform a linear
+ * search through all entries.
*/
bool is_constrained (const unsigned int index) const;
/**
* Return whether the dof is
* constrained, and whether it is
- * constrained to only one other
- * degree of freedom with weight
- * one. The function therefore
- * returns whether the degree of
- * freedom would simply be
- * eliminated in favor of exactly
+ * constrained to only one other degree
+ * of freedom with weight one. The
+ * function therefore returns whether
+ * the degree of freedom would simply
+ * be eliminated in favor of exactly
* one other degree of freedom.
*
- * The function returns @p false
- * if either the degree of
- * freedom is not constrained at
- * all, or if it is constrained
- * to more than one other degree
- * of freedom, or if it is
- * constrained to only one degree
- * of freedom but with a weight
- * different from one.
+ * The function returns @p false if
+ * either the degree of freedom is not
+ * constrained at all, or if it is
+ * constrained to more than one other
+ * degree of freedom, or if it is
+ * constrained to only one degree of
+ * freedom but with a weight different
+ * from one.
*/
bool is_identity_constrained (const unsigned int index) const;
/**
- * Return the maximum number of
- * other dofs that one dof is
- * constrained to. For example,
- * in 2d a hanging node is
- * constrained only to its two
- * neighbors, so the returned
- * value would be @p 2. However,
- * for higher order elements
- * and/or higher dimensions, or
- * other types of constraints,
- * this number is no more
+ * Return the maximum number of other
+ * dofs that one dof is constrained
+ * to. For example, in 2d a hanging
+ * node is constrained only to its two
+ * neighbors, so the returned value
+ * would be @p 2. However, for higher
+ * order elements and/or higher
+ * dimensions, or other types of
+ * constraints, this number is no more
* obvious.
*
- * The name indicates that within
- * the system matrix, references
- * to a constrained node are
- * indirected to the nodes it is
- * constrained to.
+ * The name indicates that within the
+ * system matrix, references to a
+ * constrained node are indirected to
+ * the nodes it is constrained to.
*/
unsigned int max_constraint_indirections () const;
/**
- * Print the constraint lines. Mainly for
- * debugging purposes.
+ * Print the constraint lines. Mainly
+ * for debugging purposes.
*
* This function writes out all entries
* in the constraint matrix lines with
- * their value in the form
- * <tt>row col : value</tt>. Unconstrained lines
- * containing only one identity entry are
- * not stored in this object and are not
- * printed.
+ * their value in the form <tt>row col
+ * : value</tt>. Unconstrained lines
+ * containing only one identity entry
+ * are not stored in this object and
+ * are not printed.
*/
void print (std::ostream &) const;
/**
- * Write the graph of constraints
- * in 'dot' format. 'dot' is a
- * program that can take a list
- * of nodes and produce a
- * graphical representation of
- * the graph of constrained
- * degrees of freedom and the
- * degrees of freedom they are
- * constrained to.
+ * Write the graph of constraints in
+ * 'dot' format. 'dot' is a program
+ * that can take a list of nodes and
+ * produce a graphical representation
+ * of the graph of constrained degrees
+ * of freedom and the degrees of
+ * freedom they are constrained to.
*
- * The output of this function
- * can be used as input to the
- * 'dot' program that can convert
- * the graph into a graphical
- * representation in postscript,
- * png, xfig, and a number of
- * other formats.
+ * The output of this function can be
+ * used as input to the 'dot' program
+ * that can convert the graph into a
+ * graphical representation in
+ * postscript, png, xfig, and a number
+ * of other formats.
*
- * This function exists mostly
- * for debugging purposes.
+ * This function exists mostly for
+ * debugging purposes.
*/
void write_dot (std::ostream &) const;
/**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object.
*/
unsigned int memory_consumption () const;
/**
* Condense a given sparsity
- * pattern. This function assumes
- * the uncondensed matrix struct
- * to be compressed and the one
- * to be filled to be empty. The
- * condensed structure is
- * compressed afterwards.
+ * pattern. This function assumes the
+ * uncondensed matrix struct to be
+ * compressed and the one to be filled
+ * to be empty. The condensed structure
+ * is compressed afterwards.
*
- * The constraint matrix object
- * must be closed to call this
- * function.
+ * The constraint matrix object must be
+ * closed to call this function.
*
* @note The hanging nodes are
* completely eliminated from the
* linear system refering to
- * <tt>condensed</tt>. Therefore,
+ * <tt>condensed</tt>. Therefore, the
+ * dimension of <tt>condensed</tt> is
* the dimension of
- * <tt>condensed</tt> is the
- * dimension of
* <tt>uncondensed</tt> minus the
- * number of constrained degrees
- * of freedom.
+ * number of constrained degrees of
+ * freedom.
*/
void condense (const SparsityPattern &uncondensed,
SparsityPattern &condensed) const;
/**
- * This function does much the
- * same as the above one, except
- * that it condenses the matrix
- * struct 'in-place'. It does not
- * remove nonzero entries from
- * the matrix but adds those
- * needed for the process of
- * distribution of the
- * constrained degrees of
- * freedom.
+ * This function does much the same as
+ * the above one, except that it
+ * condenses the matrix struct
+ * 'in-place'. It does not remove
+ * nonzero entries from the matrix but
+ * adds those needed for the process of
+ * distribution of the constrained
+ * degrees of freedom.
*
- * Since this function adds new
- * nonzero entries to the
- * sparsity pattern, the argument
- * must not be
- * compressed. However the
- * constraint matrix must be
- * closed. The matrix struct is
- * compressed at the end of the
- * function.
+ * Since this function adds new nonzero
+ * entries to the sparsity pattern, the
+ * argument must not be
+ * compressed. However the constraint
+ * matrix must be closed. The matrix
+ * struct is compressed at the end of
+ * the function.
*/
void condense (SparsityPattern &sparsity) const;
/**
* Same function as above, but
- * condenses square compressed
- * sparsity patterns.
+ * condenses square compressed sparsity
+ * patterns.
*
* Given the data structure used by
* CompressedSparsityPattern, this
/**
* Same function as above, but
- * condenses compressed
- * sparsity patterns, which are
- * based on the std::set container.
+ * condenses compressed sparsity
+ * patterns, which are based on the
+ * std::set container.
*/
void condense (CompressedSetSparsityPattern &sparsity) const;
/**
* Same function as above, but
- * condenses compressed
- * sparsity patterns, which are
- * based on the ''simple'' aproach.
+ * condenses compressed sparsity
+ * patterns, which are based on the
+ * ''simple'' aproach.
*/
void condense (CompressedSimpleSparsityPattern &sparsity) const;
/**
* Same function as above, but
- * condenses square compressed
- * sparsity patterns.
+ * condenses square compressed sparsity
+ * patterns.
*
- * Given the data structure used
- * by BlockCompressedSparsityPattern,
- * this function becomes
- * quadratic in the number of
- * degrees of freedom for large
- * problems and can dominate
+ * Given the data structure used by
+ * BlockCompressedSparsityPattern, this
+ * function becomes quadratic in the
+ * number of degrees of freedom for
+ * large problems and can dominate
* setting up linear systems when
- * several hundred thousand or
- * millions of unknowns are
- * involved and for problems with
- * many nonzero elements per row
- * (for example for vector-valued
- * problems or hp finite
+ * several hundred thousand or millions
+ * of unknowns are involved and for
+ * problems with many nonzero elements
+ * per row (for example for
+ * vector-valued problems or hp finite
* elements). In this case, it is
* advisable to use the
* BlockCompressedSetSparsityPattern
- * class instead, see for example
- * @ref step_27 "step-27" and
- * @ref step_31 "step-31".
+ * class instead, see for example @ref
+ * step_27 "step-27" and @ref step_31
+ * "step-31".
*/
void condense (BlockCompressedSparsityPattern &sparsity) const;
/**
* Same function as above, but
- * condenses square compressed
- * sparsity patterns.
+ * condenses square compressed sparsity
+ * patterns.
*/
void condense (BlockCompressedSetSparsityPattern &sparsity) const;
/**
* Same function as above, but
- * condenses square compressed
- * sparsity patterns.
+ * condenses square compressed sparsity
+ * patterns.
*/
void condense (BlockCompressedSimpleSparsityPattern &sparsity) const;
/**
* This function does much the same as
- * the above one, except that it condenses
- * the matrix 'in-place'. See the general
- * documentation of this class for more
- * detailed information.
+ * the above one, except that it
+ * condenses the matrix 'in-place'. See
+ * the general documentation of this
+ * class for more detailed information.
*/
template<typename number>
void condense (SparseMatrix<number> &matrix) const;
* guarantee that all entries of @p
* condensed be zero. Note that this
* function does not take any
- * inhomogeneity into account, use the
- * function using both a matrix and
+ * inhomogeneity into account and
+ * throws an exception in case there
+ * are any inhomogeneities. Use
+ * the function using both a matrix and
* vector for that case.
*
* The @p VectorType may be a
* Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a
- * PETSc or Trilinos vector
- * wrapper class, or any other
- * type having the same
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
* interface.
*/
template <class VectorType>
/**
* Condense the given vector
- * in-place. The @p VectorType
- * may be a Vector<float>,
- * Vector<double>,
- * BlockVector<tt><...></tt>, a
- * PETSc or Trilinos vector
- * wrapper class, or any other
- * type having the same
- * interface. Note that this
- * function does not take any
- * inhomogeneity into account, use the
- * function using both a matrix and
- * vector for that case.
+ * in-place. The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface. Note that this function
+ * does not take any inhomogeneity into
+ * account and throws an exception in
+ * case there are any
+ * inhomogeneities. Use the function
+ * using both a matrix and vector for
+ * that case.
*/
template <class VectorType>
void condense (VectorType &vec) const;
* responsibility to guarantee that all
* entries in the @p condensed matrix
* and vector be zero! This function is
- * capable of applying inhomogeneous
- * constraints.
+ * the appropriate choice for applying
+ * inhomogeneous constraints.
*
* The constraint matrix object must be
* closed to call this function.
ConstraintMatrix::condense (const SparseMatrix<number> &uncondensed,
SparseMatrix<number> &condensed) const
{
- const SparsityPattern &uncondensed_struct = uncondensed.get_sparsity_pattern ();
-
- Assert (sorted == true, ExcMatrixNotClosed());
- Assert (uncondensed_struct.is_compressed() == true, ExcMatrixNotClosed());
- Assert (condensed.get_sparsity_pattern().is_compressed() == true, ExcMatrixNotClosed());
- Assert (uncondensed_struct.n_rows() == uncondensed_struct.n_cols(),
- ExcNotQuadratic());
- Assert (condensed.n() == condensed.m(),
- ExcNotQuadratic());
- Assert (condensed.n()+n_constraints() == uncondensed.n(),
- ExcDimensionMismatch(condensed.n()+n_constraints(), uncondensed.n()));
-
- // store for each line of the matrix
- // its new line number
- // after compression. If the shift is
- // -1, this line will be condensed away
- std::vector<int> new_line;
-
- new_line.reserve (uncondensed_struct.n_rows());
-
- std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- unsigned int shift = 0;
- const unsigned int n_rows = uncondensed_struct.n_rows();
-
- if (next_constraint == lines.end())
- // if no constraint is to be handled
- for (unsigned int row=0; row!=n_rows; ++row)
- new_line.push_back (row);
- else
- for (unsigned int row=0; row!=n_rows; ++row)
- if (row == next_constraint->line)
- {
- // this line is constrained
- new_line.push_back (-1);
- // note that @p lines is ordered
- ++shift;
- ++next_constraint;
- if (next_constraint == lines.end())
- // nothing more to do; finish rest
- // of loop
- {
- for (unsigned int i=row+1; i<n_rows; ++i)
- new_line.push_back (i-shift);
- break;
- };
- }
- else
- new_line.push_back (row-shift);
-
-
- next_constraint = lines.begin();
- // note: in this loop we need not check
- // whether @p next_constraint is a valid
- // iterator, since @p next_constraint is
- // only evaluated so often as there are
- // entries in new_line[*] which tells us
- // which constraints exist
- for (unsigned int row=0; row<uncondensed_struct.n_rows(); ++row)
- if (new_line[row] != -1)
- // line not constrained
- // copy entries if column will not
- // be condensed away, distribute
- // otherwise
- for (unsigned int j=uncondensed_struct.get_rowstart_indices()[row];
- j<uncondensed_struct.get_rowstart_indices()[row+1]; ++j)
- if (new_line[uncondensed_struct.get_column_numbers()[j]] != -1)
- condensed.add (new_line[row], new_line[uncondensed_struct.get_column_numbers()[j]],
- uncondensed.global_entry(j));
- else
- {
- // let c point to the
- // constraint of this column
- std::vector<ConstraintLine>::const_iterator c = lines.begin();
- while (c->line != uncondensed_struct.get_column_numbers()[j])
- ++c;
-
- for (unsigned int q=0; q!=c->entries.size(); ++q)
- // distribute to rows with
- // appropriate weight
- condensed.add (new_line[row], new_line[c->entries[q].first],
- uncondensed.global_entry(j) * c->entries[q].second);
- }
- else
- // line must be distributed
- {
- for (unsigned int j=uncondensed_struct.get_rowstart_indices()[row];
- j<uncondensed_struct.get_rowstart_indices()[row+1]; ++j)
- // for each column: distribute
- if (new_line[uncondensed_struct.get_column_numbers()[j]] != -1)
- // column is not constrained
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
- condensed.add (new_line[next_constraint->entries[q].first],
- new_line[uncondensed_struct.get_column_numbers()[j]],
- uncondensed.global_entry(j) *
- next_constraint->entries[q].second);
-
- else
- // not only this line but
- // also this col is constrained
- {
- // let c point to the constraint
- // of this column
- std::vector<ConstraintLine>::const_iterator c = lines.begin();
- while (c->line != uncondensed_struct.get_column_numbers()[j])
- ++c;
-
- for (unsigned int p=0; p!=c->entries.size(); ++p)
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
- condensed.add (new_line[next_constraint->entries[q].first],
- new_line[c->entries[p].first],
- uncondensed.global_entry(j) *
- next_constraint->entries[q].second *
- c->entries[p].second);
- };
-
- ++next_constraint;
- };
+ // create two dummy vectors and enter the
+ // other function
+ Vector<number> in (0), out(0);
+ condense (uncondensed, in, condensed, out);
}
void
ConstraintMatrix::condense (SparseMatrix<number> &uncondensed) const
{
- const SparsityPattern &sparsity = uncondensed.get_sparsity_pattern ();
-
- Assert (sorted == true, ExcMatrixNotClosed());
- Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed());
- Assert (sparsity.n_rows() == sparsity.n_cols(),
- ExcNotQuadratic());
-
- double average_diagonal = 0;
- for (unsigned int i=0; i<uncondensed.m(); ++i)
- average_diagonal += std::fabs (uncondensed.diag_element(i));
- average_diagonal /= uncondensed.m();
-
- // store for each index whether it must be
- // distributed or not. If entry is
- // invalid_unsigned_int, no distribution is
- // necessary. otherwise, the number states
- // which line in the constraint matrix
- // handles this index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
-
- for (unsigned int c=0; c<lines.size(); ++c)
- distribute[lines[c].line] = c;
-
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
- {
- if (distribute[row] == numbers::invalid_unsigned_int)
- // regular line. loop over cols
- {
- for (typename SparseMatrix<number>::iterator
- entry = uncondensed.begin(row);
- entry != uncondensed.end(row); ++entry)
- {
- const unsigned int column = entry->column();
-
- // end of row reached?
- // this should not
- // happen, since we only
- // operate on compressed
- // matrices!
- Assert (column != SparsityPattern::invalid_entry,
- ExcMatrixNotClosed());
-
- if (distribute[column] != numbers::invalid_unsigned_int)
- // distribute entry at
- // regular row @p row
- // and irregular column
- // sparsity.get_column_numbers()[j];
- // set old entry to
- // zero
- {
- for (unsigned int q=0;
- q!=lines[distribute[column]].entries.size(); ++q)
- uncondensed.add (row,
- lines[distribute[column]].entries[q].first,
- entry->value() *
- lines[distribute[column]].entries[q].second);
-
- // set old value to zero
- entry->value() = 0.;
- }
- }
- }
- else
- // row must be distributed
- {
- for (typename SparseMatrix<number>::iterator
- entry = uncondensed.begin(row);
- entry != uncondensed.end(row); ++entry)
- {
- const unsigned int column = entry->column();
-
- // end of row reached?
- // this should not
- // happen, since we only
- // operate on compressed
- // matrices!
- Assert (column != SparsityPattern::invalid_entry,
- ExcMatrixNotClosed());
-
- if (distribute[column] == numbers::invalid_unsigned_int)
- // distribute entry at
- // irregular row
- // @p row and regular
- // column
- // column. set
- // old entry to zero
- {
- for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
- uncondensed.add (lines[distribute[row]].entries[q].first,
- column,
- entry->value() *
- lines[distribute[row]].entries[q].second);
-
- // set old entry to zero
- entry->value() = 0.;
- }
- else
- // distribute entry at
- // irregular row @p row and
- // irregular column
- // @p column set old entry
- // to one on main
- // diagonal, zero otherwise
- {
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0;
- q!=lines[distribute[column]].entries.size(); ++q)
- uncondensed.add (lines[distribute[row]].entries[p].first,
- lines[distribute[column]].entries[q].first,
- entry->value() *
- lines[distribute[row]].entries[p].second *
- lines[distribute[column]].entries[q].second);
-
- // set old entry to correct
- // value
- entry->value() = (row == column ? average_diagonal : 0. );
- }
- }
- }
- }
+ Vector<number> dummy (0);
+ condense (uncondensed, dummy);
}
-template <typename number>
-void
-ConstraintMatrix::condense (BlockSparseMatrix<number> &uncondensed) const
-{
- const unsigned int blocks = uncondensed.n_block_rows();
-
- const BlockSparsityPattern &
- sparsity = uncondensed.get_sparsity_pattern ();
-
- Assert (sorted == true, ExcMatrixNotClosed());
- Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed());
- Assert (sparsity.n_rows() == sparsity.n_cols(),
- ExcNotQuadratic());
- Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
- ExcNotQuadratic());
- Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
- ExcNotQuadratic());
- Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
- ExcNotQuadratic());
-
- double average_diagonal = 0;
- for (unsigned int b=0; b<uncondensed.n_block_rows(); ++b)
- for (unsigned int i=0; i<uncondensed.block(b,b).m(); ++i)
- average_diagonal += std::fabs (uncondensed.block(b,b).diag_element(i));
- average_diagonal /= uncondensed.m();
-
- const BlockIndices &
- index_mapping = sparsity.get_column_indices();
-
- // store for each index whether it must be
- // distributed or not. If entry is
- // numbers::invalid_unsigned_int,
- // no distribution is necessary.
- // otherwise, the number states which line
- // in the constraint matrix handles this
- // index
- std::vector<unsigned int> distribute (sparsity.n_rows(),
- numbers::invalid_unsigned_int);
-
- for (unsigned int c=0; c<lines.size(); ++c)
- distribute[lines[c].line] = c;
-
- const unsigned int n_rows = sparsity.n_rows();
- for (unsigned int row=0; row<n_rows; ++row)
- {
- // get index of this row
- // within the blocks
- const std::pair<unsigned int,unsigned int>
- block_index = index_mapping.global_to_local(row);
- const unsigned int block_row = block_index.first;
-
- if (distribute[row] == numbers::invalid_unsigned_int)
- // regular line. loop over
- // all columns and see
- // whether this column must
- // be distributed
- {
-
- // to loop over all entries
- // in this row, we have to
- // loop over all blocks in
- // this blockrow and the
- // corresponding row
- // therein
- for (unsigned int block_col=0; block_col<blocks; ++block_col)
- {
- for (typename SparseMatrix<number>::iterator
- entry = uncondensed.block(block_row, block_col).begin(block_index.second);
- entry != uncondensed.block(block_row, block_col).end(block_index.second);
- ++entry)
- {
- const unsigned int global_col
- = index_mapping.local_to_global(block_col,entry->column());
-
- if (distribute[global_col] != numbers::invalid_unsigned_int)
- // distribute entry at
- // regular row @p row
- // and irregular column
- // global_col; set old
- // entry to zero
- {
- const double old_value = entry->value ();
-
- for (unsigned int q=0;
- q!=lines[distribute[global_col]].entries.size(); ++q)
- uncondensed.add (row,
- lines[distribute[global_col]].entries[q].first,
- old_value *
- lines[distribute[global_col]].entries[q].second);
-
- entry->value() = 0.;
- }
- }
- }
- }
- else
- {
- // row must be
- // distributed. split the
- // whole row into the
- // chunks defined by the
- // blocks
- for (unsigned int block_col=0; block_col<blocks; ++block_col)
- {
- for (typename SparseMatrix<number>::iterator
- entry = uncondensed.block(block_row, block_col).begin(block_index.second);
- entry != uncondensed.block(block_row, block_col).end(block_index.second);
- ++entry)
- {
- const unsigned int global_col
- = index_mapping.local_to_global (block_col, entry->column());
-
- if (distribute[global_col] ==
- numbers::invalid_unsigned_int)
- // distribute
- // entry at
- // irregular
- // row @p row
- // and regular
- // column
- // global_col. set
- // old entry to
- // zero
- {
- const double old_value = entry->value();
-
- for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
- uncondensed.add (lines[distribute[row]].entries[q].first,
- global_col,
- old_value *
- lines[distribute[row]].entries[q].second);
-
- entry->value() = 0.;
- }
- else
- // distribute entry at
- // irregular row @p row
- // and irregular column
- // @p global_col set old
- // entry to one if on
- // main diagonal, zero
- // otherwise
- {
- const double old_value = entry->value ();
-
- for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p)
- for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
- uncondensed.add (lines[distribute[row]].entries[p].first,
- lines[distribute[global_col]].entries[q].first,
- old_value *
- lines[distribute[row]].entries[p].second *
- lines[distribute[global_col]].entries[q].second);
-
- entry->value() = (row == global_col ? average_diagonal : 0. );
- }
- }
- }
- }
- }
+template <typename number>
+void
+ConstraintMatrix::condense (BlockSparseMatrix<number> &uncondensed) const
+{
+ BlockVector<number> dummy (0);
+ condense (uncondensed, dummy);
}
SparseMatrix<number> &condensed,
VectorType &condensed_vector) const
{
+ // check whether we work on real vectors
+ // or we just used a dummy when calling
+ // the other function above.
+ const bool use_vectors = (uncondensed_vector.size() == 0 &&
+ condensed_vector.size() == 0) ? false : true;
+
const SparsityPattern &uncondensed_struct = uncondensed.get_sparsity_pattern ();
Assert (sorted == true, ExcMatrixNotClosed());
ExcNotQuadratic());
Assert (condensed.n()+n_constraints() == uncondensed.n(),
ExcDimensionMismatch(condensed.n()+n_constraints(), uncondensed.n()));
- Assert (condensed_vector.size()+n_constraints() == uncondensed_vector.size(),
- ExcDimensionMismatch(condensed_vector.size()+n_constraints(),
- uncondensed_vector.size()));
- Assert (condensed_vector.size() == condensed.m(),
- ExcDimensionMismatch(condensed_vector.size(), condensed.m()));
+ if (use_vectors == true)
+ {
+ Assert (condensed_vector.size()+n_constraints() == uncondensed_vector.size(),
+ ExcDimensionMismatch(condensed_vector.size()+n_constraints(),
+ uncondensed_vector.size()));
+ Assert (condensed_vector.size() == condensed.m(),
+ ExcDimensionMismatch(condensed_vector.size(), condensed.m()));
+ }
// store for each line of the matrix
// its new line number
// explicit elimination in the respective
// row of the inhomogeneous constraint in
// the matrix with Gauss elimination
- condensed_vector(new_line[row]) -= uncondensed.global_entry(j) /
- uncondensed.diag_element(row) * c->inhomogeneity;
+ if (use_vectors == true)
+ condensed_vector(new_line[row]) -= uncondensed.global_entry(j) *
+ c->inhomogeneity;
}
- condensed_vector(new_line[row]) += uncondensed_vector(row);
+ if (use_vectors == true)
+ condensed_vector(new_line[row]) += uncondensed_vector(row);
}
else
// line must be distributed
c->entries[p].second);
};
- // distribute vector
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
- condensed_vector(new_line[next_constraint->entries[q].first])
- +=
- uncondensed_vector(row) * next_constraint->entries[q].second;
+ // condense the vector
+ if (use_vectors == true)
+ for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ condensed_vector(new_line[next_constraint->entries[q].first])
+ +=
+ uncondensed_vector(row) * next_constraint->entries[q].second;
++next_constraint;
};
ConstraintMatrix::condense (SparseMatrix<number> &uncondensed,
VectorType &vec) const
{
+ // check whether we work on real vectors
+ // or we just used a dummy when calling
+ // the other function above.
+ const bool use_vectors = vec.size() == 0 ? false : true;
+
const SparsityPattern &sparsity = uncondensed.get_sparsity_pattern ();
Assert (sorted == true, ExcMatrixNotClosed());
Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed());
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
- Assert (vec.size() == sparsity.n_rows(),
- ExcDimensionMismatch(vec.size(), sparsity.n_rows()));
+ if (use_vectors == true)
+ {
+ Assert (vec.size() == sparsity.n_rows(),
+ ExcDimensionMismatch(vec.size(), sparsity.n_rows()));
+ }
double average_diagonal = 0;
for (unsigned int i=0; i<uncondensed.m(); ++i)
// explicit elimination in the respective
// row of the inhomogeneous constraint in
// the matrix with Gauss elimination
- vec(column) -= entry->value() *
- lines[distribute[column]].inhomogeneity;
+ if (use_vectors == true)
+ vec(column) -= entry->value() *
+ lines[distribute[column]].inhomogeneity;
// set old value to zero
entry->value() = 0.;
}
// take care of vector
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
- vec(lines[distribute[row]].entries[q].first)
- += (vec(row) * lines[distribute[row]].entries[q].second);
+ if (use_vectors == true)
+ {
+ for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ vec(lines[distribute[row]].entries[q].first)
+ += (vec(row) * lines[distribute[row]].entries[q].second);
- vec(lines[distribute[row]].line) = 0.;
+ vec(lines[distribute[row]].line) = 0.;
+ }
}
}
}
ConstraintMatrix::condense (BlockSparseMatrix<number> &uncondensed,
BlockVectorType &vec) const
{
+ // check whether we work on real vectors
+ // or we just used a dummy when calling
+ // the other function above.
+ const bool use_vectors = vec.n_blocks() == 0 ? false : true;
+
const unsigned int blocks = uncondensed.n_block_rows();
const BlockSparsityPattern &
ExcNotQuadratic());
Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
ExcNotQuadratic());
- Assert (vec.size() == sparsity.n_rows(),
- ExcDimensionMismatch(vec.size(), sparsity.n_rows()));
- Assert (vec.n_blocks() == sparsity.n_block_rows(),
- ExcDimensionMismatch(vec.n_blocks(), sparsity.n_block_rows()));
+
+ if (use_vectors == true)
+ {
+ Assert (vec.size() == sparsity.n_rows(),
+ ExcDimensionMismatch(vec.size(), sparsity.n_rows()));
+ Assert (vec.n_blocks() == sparsity.n_block_rows(),
+ ExcDimensionMismatch(vec.n_blocks(), sparsity.n_block_rows()));
+ }
double average_diagonal = 0;
for (unsigned int b=0; b<uncondensed.n_block_rows(); ++b)
// explicit elimination in the respective
// row of the inhomogeneous constraint in
// the matrix with Gauss elimination
- vec(global_col) -= entry->value() *
- lines[distribute[global_col]].inhomogeneity;
+ if (use_vectors == true)
+ vec(global_col) -= entry->value() *
+ lines[distribute[global_col]].inhomogeneity;
entry->value() = 0.;
}
}
// take care of vector
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
- vec(lines[distribute[row]].entries[q].first)
- += (vec(row) * lines[distribute[row]].entries[q].second);
+ if (use_vectors == true)
+ {
+ for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ vec(lines[distribute[row]].entries[q].first)
+ += (vec(row) * lines[distribute[row]].entries[q].second);
- vec(lines[distribute[row]].line) = 0.;
+ vec(lines[distribute[row]].line) = 0.;
+ }
}
}
}
const std::vector<unsigned int> &local_dof_indices,
MatrixType &global_matrix) const
{
- Assert (local_matrix.n() == local_dof_indices.size(),
- ExcDimensionMismatch(local_matrix.n(), local_dof_indices.size()));
- Assert (local_matrix.m() == local_dof_indices.size(),
- ExcDimensionMismatch(local_matrix.m(), local_dof_indices.size()));
- Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
- Assert (sorted == true, ExcMatrixNotClosed());
-
- const unsigned int n_local_dofs = local_dof_indices.size();
-
- // A lock that allows only one thread at
- // time to go on in this function.
- Threads::ThreadMutex::ScopedLock lock(mutex);
-
- // have a special case where there are no
- // constraints at all, since then we can be
- // a lot faster
- if (lines.size() == 0)
- global_matrix.add(local_dof_indices, local_matrix);
- else
- {
- // here we have to do something a
- // little nastier than in the
- // respective function for
- // vectors. the reason is that we
- // have two nested loops and we don't
- // want to repeatedly check whether a
- // certain dof is constrained or not
- // by searching over all the
- // constrained dofs. so we have to
- // cache this knowledge, by storing
- // for each dof index whether and
- // where the line of the constraint
- // matrix is located. Moreover, we
- // store how many entries there are
- // at most in one constrained row in
- // order to set the scratch array for
- // column data to a sufficient size.
- std::vector<const ConstraintLine *>
- constraint_lines (n_local_dofs,
- static_cast<const ConstraintLine *>(0));
- unsigned int n_max_entries_per_row = 0;
- for (unsigned int i=0; i<n_local_dofs; ++i)
- {
- ConstraintLine index_comparison;
- index_comparison.line = local_dof_indices[i];
-
- const std::vector<ConstraintLine>::const_iterator
- position = std::lower_bound (lines.begin(),
- lines.end(),
- index_comparison);
-
- // if this dof is constrained,
- // then set the respective entry
- // in the array. otherwise leave
- // it at the invalid position
- if ((position != lines.end()) &&
- (position->line == local_dof_indices[i]))
- {
- constraint_lines[i] = &*position;
- n_max_entries_per_row += position->entries.size();
- }
- }
-
- // We need to add the number of
- // entries in the local matrix in
- // order to obtain a sufficient size
- // for the scratch array.
- n_max_entries_per_row += n_local_dofs;
- if (column_indices.size() < n_max_entries_per_row)
- {
- column_indices.resize(n_max_entries_per_row);
- column_values.resize(n_max_entries_per_row);
- }
-
- // now distribute entries row by row
- for (unsigned int i=0; i<n_local_dofs; ++i)
- {
- const ConstraintLine *position_i = constraint_lines[i];
- const bool is_constrained_i = (position_i != 0);
-
- unsigned int col_counter = 0;
-
- for (unsigned int j=0; j<n_local_dofs; ++j)
- {
- // we don't need to proceed when the
- // matrix element is zero
- if (local_matrix(i,j) == 0)
- continue;
-
- const ConstraintLine *position_j = constraint_lines[j];
- const bool is_constrained_j = (position_j != 0);
-
- if ((is_constrained_i == false) &&
- (is_constrained_j == false))
- {
- // neither row nor column
- // is constrained, so
- // write the value into
- // the scratch array
- column_indices[col_counter] = local_dof_indices[j];
- column_values[col_counter] = local_matrix(i,j);
- col_counter++;
- }
- else if ((is_constrained_i == true) &&
- (is_constrained_j == false))
- {
- // ok, row is
- // constrained, but
- // column is not. This
- // creates entries in
- // several rows to the
- // same column, which is
- // not covered by the
- // scratch array. Write
- // the values directly
- // into the matrix
- for (unsigned int q=0; q<position_i->entries.size(); ++q)
- global_matrix.add (position_i->entries[q].first,
- local_dof_indices[j],
- local_matrix(i,j) *
- position_i->entries[q].second);
- }
- else if ((is_constrained_i == false) &&
- (is_constrained_j == true))
- {
- // simply the other way
- // round: row ok, column
- // is constrained. This
- // time, we can put
- // everything into the
- // scratch array, since
- // we are in the correct
- // row.
- for (unsigned int q=0; q<position_j->entries.size(); ++q)
- {
- column_indices[col_counter] = position_j->entries[q].first;
- column_values[col_counter] = local_matrix(i,j) *
- position_j->entries[q].second;
- col_counter++;
- }
- }
- else if ((is_constrained_i == true) &&
- (is_constrained_j == true))
- {
- // last case: both row
- // and column are
- // constrained. Again,
- // this creates entries
- // in other rows than the
- // current one, so write
- // the values again in
- // the matrix directly
- for (unsigned int p=0; p<position_i->entries.size(); ++p)
- for (unsigned int q=0; q<position_j->entries.size(); ++q)
- global_matrix.add (position_i->entries[p].first,
- position_j->entries[q].first,
- local_matrix(i,j) *
- position_i->entries[p].second *
- position_j->entries[q].second);
-
- // to make sure that the
- // global matrix remains
- // invertible, we need to
- // do something with the
- // diagonal elements. add
- // the absolute value of
- // the local matrix, so
- // the resulting entry
- // will always be
- // positive and
- // furthermore be in the
- // same order of
- // magnitude as the other
- // elements of the matrix
- //
- // note that this also
- // captures the special
- // case that a dof is
- // both constrained and
- // fixed (this can happen
- // for hanging nodes in
- // 3d that also happen to
- // be on the
- // boundary). in that
- // case, following the
- // above program flow, it
- // is realized that when
- // distributing the row
- // and column no elements
- // of the matrix are
- // actually touched if
- // all the degrees of
- // freedom to which this
- // dof is constrained are
- // also constrained (the
- // usual case with
- // hanging nodes in
- // 3d). however, in the
- // line below, we do
- // actually do something
- // with this dof
- if (i == j)
- {
- column_indices[col_counter] = local_dof_indices[j];
- column_values[col_counter] = local_matrix(i,j);
- col_counter++;
- }
- }
- else
- Assert (false, ExcInternalError());
- }
-
- // Check whether we did remain within the
- // arrays when adding elements into the
- // scratch arrays. Moreover, there should
- // be at least one element in the scratch
- // array (the element diagonal).
- Assert (col_counter <= n_max_entries_per_row, ExcInternalError());
-
- // Finally, write the scratch array into
- // the sparse matrix.
- if (col_counter > 0)
- global_matrix.add(local_dof_indices[i], col_counter,
- &column_indices[0], &column_values[0],
- false);
- }
- }
+ Vector<double> local_dummy(0), global_dummy (0);
+ distribute_local_to_global (local_matrix, local_dummy, local_dof_indices,
+ global_matrix, global_dummy);
}
MatrixType &global_matrix,
VectorType &global_vector) const
{
+ // check whether we work on real vectors
+ // or we just used a dummy when calling
+ // the other function above.
+ const bool use_vectors = (local_vector.size() == 0 &&
+ global_vector.size() == 0) ? false : true;
+
Assert (local_matrix.n() == local_dof_indices.size(),
ExcDimensionMismatch(local_matrix.n(), local_dof_indices.size()));
Assert (local_matrix.m() == local_dof_indices.size(),
ExcDimensionMismatch(local_matrix.m(), local_dof_indices.size()));
Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
- Assert (local_matrix.m() == local_vector.size(),
- ExcDimensionMismatch(local_matrix.m(), local_vector.size()));
- Assert (global_matrix.m() == global_vector.size(),
- ExcDimensionMismatch(global_matrix.m(), global_vector.size()));
+ if (use_vectors == true)
+ {
+ Assert (local_matrix.m() == local_vector.size(),
+ ExcDimensionMismatch(local_matrix.m(), local_vector.size()));
+ Assert (global_matrix.m() == global_vector.size(),
+ ExcDimensionMismatch(global_matrix.m(), global_vector.size()));
+ }
Assert (sorted == true, ExcMatrixNotClosed());
const unsigned int n_local_dofs = local_dof_indices.size();
if (lines.size() == 0)
{
global_matrix.add(local_dof_indices, local_matrix);
- for (unsigned int i=0; i<local_dof_indices.size(); ++i)
- global_vector(local_dof_indices[i]) += local_vector(i);
+ if (use_vectors == true)
+ for (unsigned int i=0; i<local_dof_indices.size(); ++i)
+ global_vector(local_dof_indices[i]) += local_vector(i);
}
else
{
// explicit elimination in the respective
// row of the inhomogeneous constraint in
// the matrix with Gauss elimination
- global_vector(local_dof_indices[i]) -= local_matrix(j,i) *
- position_j->inhomogeneity;
+ if (use_vectors == true)
+ global_vector(local_dof_indices[i]) -= local_matrix(j,i) *
+ position_j->inhomogeneity;
}
else if ((is_constrained_i == true) &&
(is_constrained_j == true))
if (i == j)
{
column_indices[col_counter] = local_dof_indices[j];
- if (std::fabs (local_matrix(i,j)) < 1e-8)
- column_values[col_counter] = 1;
- else
- column_values[col_counter] = local_matrix(i,j);
+ column_values[col_counter] = local_matrix(i,j);
col_counter++;
}
}
false);
// And we take care of the vector
- if (is_constrained_i == true)
- for (unsigned int q=0; q<position_i->entries.size(); ++q)
- global_vector(position_i->entries[q].first)
- += local_vector(i) * position_i->entries[q].second;
- else
- global_vector(local_dof_indices[i]) += local_vector(i);
- }
+ if (use_vectors == true)
+ {
+ if (is_constrained_i == true)
+ for (unsigned int q=0; q<position_i->entries.size(); ++q)
+ global_vector(position_i->entries[q].first)
+ += local_vector(i) * position_i->entries[q].second;
+ else
+ global_vector(local_dof_indices[i]) += local_vector(i);
+ }
+ }
}
}