]> https://gitweb.dealii.org/ - dealii.git/commitdiff
lac: Add AffineConstraints class
authorMatthias Maier <tamiko@43-1.org>
Thu, 24 May 2018 17:09:53 +0000 (12:09 -0500)
committerMatthias Maier <tamiko@43-1.org>
Wed, 6 Jun 2018 15:19:38 +0000 (10:19 -0500)
For now this is a verbatim copy of the ConstraintMatrix class. The idea
is to templatify this class and after that switch the code base to it.

include/deal.II/lac/affine_constraints.h [new file with mode: 0644]
include/deal.II/lac/affine_constraints.templates.h [new file with mode: 0644]
source/lac/CMakeLists.txt
source/lac/affine_constraints.cc [new file with mode: 0644]
source/lac/affine_constraints.inst.in [new file with mode: 0644]

diff --git a/include/deal.II/lac/affine_constraints.h b/include/deal.II/lac/affine_constraints.h
new file mode 100644 (file)
index 0000000..7fa45a9
--- /dev/null
@@ -0,0 +1,1978 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 1998 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii_affine_constraints_h
+#define dealii_affine_constraints_h
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/exceptions.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/subscriptor.h>
+#include <deal.II/base/template_constraints.h>
+
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/vector_element_access.h>
+
+#include <boost/range/iterator_range.hpp>
+
+#include <vector>
+#include <set>
+#include <utility>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+template <int dim, class T> class Table;
+template <typename> class FullMatrix;
+class SparsityPattern;
+class DynamicSparsityPattern;
+class BlockSparsityPattern;
+class BlockDynamicSparsityPattern;
+template <typename number> class SparseMatrix;
+template <typename number> class BlockSparseMatrix;
+
+namespace internals
+{
+  class GlobalRowsFromLocal;
+}
+
+
+// TODO[WB]: We should have a function of the kind
+//   AffineConstraints::add_constraint (const size_type constrained_dof,
+//     const std::vector<std::pair<size_type, double> > &entries,
+//     const double inhomogeneity = 0);
+// rather than building up constraints piecemeal through add_line/add_entry
+// etc. This would also eliminate the possibility of accidentally changing
+// existing constraints into something pointless, see the discussion on the
+// mailing list on "Tiny bug in interpolate_boundary_values" in Sept. 2010.
+
+/**
+ * This class implements dealing with linear (possibly inhomogeneous)
+ * constraints on degrees of freedom. The concept and origin of such
+ * constraints is extensively described in the
+ * @ref constraints
+ * module. The class is meant to deal with a limited number of constraints
+ * relative to the total number of degrees of freedom, for example a few per
+ * cent up to maybe 30 per cent; and with a linear combination of <i>M</i>
+ * other degrees of freedom where <i>M</i> is also relatively small (no larger
+ * than at most around the average number of entries per row of a linear
+ * system). It is <em>not</em> meant to describe full rank linear systems.
+ *
+ * The algorithms used in the implementation of this class are described in
+ * some detail in the
+ * @ref hp_paper "hp paper".
+ * There is also a significant amount of documentation on how to use this
+ * class in the
+ * @ref constraints
+ * module.
+ *
+ *
+ * <h3>Description of constraints</h3>
+ *
+ * Each "line" in objects of this class corresponds to one constrained degree
+ * of freedom, with the number of the line being <i>i</i>, entered by using
+ * add_line() or add_lines(). The entries in this line are pairs of the form
+ * (<i>j</i>,<i>a<sub>ij</sub></i>), which are added by add_entry() or
+ * add_entries(). The organization is essentially a SparsityPattern, but with
+ * only a few lines containing nonzero elements, and  therefore no data wasted
+ * on the others. For each line, which has been added by the mechanism above,
+ * an elimination of the constrained degree of freedom of the form
+ * @f[
+ *  x_i = \sum_j a_{ij} x_j + b_i
+ * @f]
+ * is performed, where <i>b<sub>i</sub></i> is optional and set by
+ * set_inhomogeneity(). Thus, if a constraint is formulated for instance as a
+ * zero mean value of several degrees of freedom, one of the degrees has to be
+ * chosen to be eliminated.
+ *
+ * Note that the constraints are linear in the <i>x<sub>i</sub></i>, and that
+ * there might be a constant (non-homogeneous) term in the constraint. This is
+ * exactly the form we need for hanging node constraints, where we need to
+ * constrain one degree of freedom in terms of others. There are other
+ * conditions of this form possible, for example for implementing mean value
+ * conditions as is done in the step-11 tutorial program. The name of the
+ * class stems from the fact that these constraints can be represented in
+ * matrix form as <b>X</b> <i>x</i> = <i>b</i>, and this object then describes
+ * the matrix <b>X</b> and the vector <i>b</i>. The most frequent way to
+ * create/fill objects of this type is using the
+ * DoFTools::make_hanging_node_constraints() function. The use of these
+ * objects is first explained in step-6.
+ *
+ * Objects of the present type are organized in lines (rows), but only those
+ * lines are stored where constraints are present. New constraints are added
+ * by adding new lines using the add_line() function, and then populating it
+ * using the add_entry() function to a given line, or add_entries() to add
+ * more than one entry at a time. The right hand side element, if nonzero, can
+ * be set using the set_inhomogeneity() function. After all constraints have
+ * been added, you need to call close(), which compresses the storage format
+ * and sorts the entries.
+ *
+ * @note Many of the algorithms this class implements are discussed in the
+ * @ref hp_paper.
+ * The algorithms are also related to those shown in <i>M. S. Shephard: Linear
+ * multipoint constraints applied via transformation as part of a direct
+ * stiffness assembly process. Int. J. Numer. Meth. Engrg., vol. 20 (1984),
+ * pp. 2107-2112.</i>, with the difference that the algorithms shown there
+ * completely eliminated constrained degrees of freedom, whereas we usually
+ * keep them as part of the linear system.
+ *
+ * @ingroup dofs
+ * @ingroup constraints
+ * @author Wolfgang Bangerth, Martin Kronbichler, 1998, 2004, 2008, 2009
+ */
+class AffineConstraints : public Subscriptor
+{
+public:
+  /**
+   * Declare the type for container size.
+   */
+  typedef types::global_dof_index size_type;
+
+  /**
+   * An enum that describes what should happen if the two AffineConstraints
+   * objects involved in a call to the merge() function happen to have
+   * constraints on the same degrees of freedom.
+   */
+  enum MergeConflictBehavior
+  {
+    /**
+     * Throw an exception if the two objects concerned have conflicting
+     * constraints on the same degree of freedom.
+     */
+    no_conflicts_allowed,
+
+    /**
+     * In an operation <code>cm1.merge(cm2)</code>, if <code>cm1</code> and
+     * <code>cm2</code> have constraints on the same degree of freedom, take
+     * the one from <code>cm1</code>.
+     */
+    left_object_wins,
+
+    /**
+     * In an operation <code>cm1.merge(cm2)</code>, if <code>cm1</code> and
+     * <code>cm2</code> have constraints on the same degree of freedom, take
+     * the one from <code>cm2</code>.
+     */
+    right_object_wins
+  };
+
+  /**
+   * Constructor. The supplied IndexSet defines which indices might be
+   * constrained inside this AffineConstraints container. In a calculation
+   * with a DoFHandler object based on parallel::distributed::Triangulation
+   * or parallel::shared::Triangulation, one should use the set of locally
+   * relevant dofs (see @ref GlossLocallyRelevantDof).
+   *
+   * The given IndexSet allows the AffineConstraints container to save
+   * memory by just not caring about degrees of freedom that are not of
+   * importance to the current processor. Alternatively, if no such
+   * IndexSet is provided, internal data structures for <i>all</i> possible
+   * indices will be created, leading to memory consumption on every
+   * processor that is proportional to the <i>overall</i> size of the
+   * problem, not just proportional to the size of the portion of the
+   * overall problem that is handled by the current processor.
+   */
+  explicit AffineConstraints (const IndexSet &local_constraints = IndexSet());
+
+  /**
+   * Copy constructor
+   */
+  explicit AffineConstraints (const AffineConstraints &affine_constraints);
+
+  /**
+   * Move constructor
+   */
+  AffineConstraints (AffineConstraints &&affine_constraints) = default;
+
+  /**
+   * Copy operator. Like for many other large objects, this operator
+   * is deleted to avoid its inadvertent use in places such as
+   * accidentally declaring a @p AffineConstraints object as a
+   * function argument by value, rather than by reference.
+   *
+   * However, you can use the copy_from() function to explicitly
+   * copy AffineConstraints objects.
+   */
+  AffineConstraints &operator= (const AffineConstraints &) = delete;
+
+  /**
+   * Move assignment operator
+   */
+  AffineConstraints &operator= (AffineConstraints &&affine_constraints) = default;
+
+  /**
+   * Copy the given object to the current one.
+   *
+   * This function exists because @p operator=() is explicitly
+   * disabled.
+   */
+  void copy_from (const AffineConstraints &other);
+
+  /**
+   * clear() the AffineConstraints object and supply an IndexSet with lines
+   * that may be constrained. This function is only relevant in the
+   * distributed case to supply a different IndexSet. Otherwise this routine
+   * is equivalent to calling clear(). See the constructor for details.
+   */
+  void reinit (const IndexSet &local_constraints = IndexSet());
+
+  /**
+   * Determines if we can store a constraint for the given @p line_index. This
+   * routine only matters in the distributed case and checks if the IndexSet
+   * allows storage of this line. Always returns true if not in the
+   * distributed case.
+   */
+  bool can_store_line (const size_type line_index) const;
+
+  /**
+   * Return the index set describing locally relevant lines if any are
+   * present. Note that if no local lines were given, this represents an empty
+   * IndexSet, whereas otherwise it contains the global problem size and the
+   * local range.
+   */
+  const IndexSet &get_local_lines() const;
+
+  /**
+   * This function copies the content of @p constraints_in with DoFs that are
+   * element of the IndexSet @p filter. Elements that are not present in the
+   * IndexSet are ignored. All DoFs will be transformed to local index space
+   * of the filter, both the constrained DoFs and the other DoFs these entries
+   * are constrained to. The local index space of the filter is a contiguous
+   * numbering of all (global) DoFs that are elements in the filter.
+   *
+   * If, for example, the filter represents the range <tt>[10,20)</tt>, and
+   * the constraint matrix @p constraints_in includes the global indices
+   * <tt>{7,13,14}</tt>, the indices <tt>{3,4}</tt> are added to the calling
+   * constraint matrix (since 13 and 14 are elements in the filter and element
+   * 13 is the fourth element in the index, and 14 is the fifth).
+   *
+   * This function provides an easy way to create a AffineConstraints for
+   * certain vector components in a vector-valued problem from a full
+   * AffineConstraints, i.e. extracting a diagonal subblock from a larger
+   * AffineConstraints. The block is specified by the IndexSet argument.
+   */
+  void add_selected_constraints (const AffineConstraints &constraints_in,
+                                 const IndexSet          &filter);
+
+  /**
+   * @name Adding constraints
+   * @{
+   */
+
+  /**
+   * Add a new line to the matrix. If the line already exists, then the
+   * function simply returns without doing anything.
+   */
+  void add_line (const size_type line);
+
+  /**
+   * Call the first add_line() function for every index <code>i</code> for
+   * which <code>lines[i]</code> is true.
+   *
+   * This function essentially exists to allow adding several constraints of
+   * the form <i>x<sub>i</sub></i>=0 all at once, where the set of indices
+   * <i>i</i> for which these constraints should be added are given by the
+   * argument of this function. On the other hand, just as if the single-
+   * argument add_line() function were called repeatedly, the constraints can
+   * later be modified to include linear dependencies using the add_entry()
+   * function as well as inhomogeneities using set_inhomogeneity().
+   */
+  void add_lines (const std::vector<bool> &lines);
+
+  /**
+   * Call the first add_line() function for every index <code>i</code> that
+   * appears in the argument.
+   *
+   * This function essentially exists to allow adding several constraints of
+   * the form <i>x<sub>i</sub></i>=0 all at once, where the set of indices
+   * <i>i</i> for which these constraints should be added are given by the
+   * argument of this function. On the other hand, just as if the single-
+   * argument add_line() function were called repeatedly, the constraints can
+   * later be modified to include linear dependencies using the add_entry()
+   * function as well as inhomogeneities using set_inhomogeneity().
+   */
+  void add_lines (const std::set<size_type> &lines);
+
+  /**
+   * Call the first add_line() function for every index <code>i</code> that
+   * appears in the argument.
+   *
+   * This function essentially exists to allow adding several constraints of
+   * the form <i>x<sub>i</sub></i>=0 all at once, where the set of indices
+   * <i>i</i> for which these constraints should be added are given by the
+   * argument of this function. On the other hand, just as if the single-
+   * argument add_line() function were called repeatedly, the constraints can
+   * later be modified to include linear dependencies using the add_entry()
+   * function as well as inhomogeneities using set_inhomogeneity().
+   */
+  void add_lines (const IndexSet &lines);
+
+  /**
+   * Add an entry to a given line. The list of lines is searched from the back
+   * to the front, so clever programming would add a new line (which is pushed
+   * to the back) and immediately afterwards fill the entries of that line.
+   * This way, no expensive searching is needed.
+   *
+   * If an entry with the same indices as the one this function call denotes
+   * already exists, then this function simply returns provided that the value
+   * of the entry is the same. Thus, it does no harm to enter a constraint
+   * twice.
+   */
+  void add_entry (const size_type line,
+                  const size_type column,
+                  const double value);
+
+  /**
+   * Add a whole series of entries, denoted by pairs of column indices and
+   * values, to a line of constraints. This function is equivalent to calling
+   * the preceding function several times, but is faster.
+   */
+  void add_entries (const size_type                                  line,
+                    const std::vector<std::pair<size_type,double> > &col_val_pairs);
+
+  /**
+   * Set an inhomogeneity to the constraint line <i>i</i>, according to the
+   * discussion in the general class description.
+   *
+   * @note the line needs to be added with one of the add_line() calls first.
+   */
+  void set_inhomogeneity (const size_type line,
+                          const double    value);
+
+  /**
+   * Close the filling of entries. Since the lines of a matrix of this type
+   * are usually filled in an arbitrary order and since we do not want to use
+   * associative constrainers to store the lines, we need to sort the lines
+   * and within the lines the columns before usage of the matrix. This is done
+   * through this function.
+   *
+   * Also, zero entries are discarded, since they are not needed.
+   *
+   * After closing, no more entries are accepted. If the object was already
+   * closed, then this function returns immediately.
+   *
+   * This function also resolves chains of constraints. For example, degree of
+   * freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$
+   * while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
+   * + \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
+   * \frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that
+   * cycles in this graph of constraints are not allowed, i.e. for example
+   * $u_4$ may not be constrained, directly or indirectly, to $u_{13}$ again.
+   */
+  void close ();
+
+  /**
+   * Merge the constraints represented by the object given as argument into
+   * the constraints represented by this object. Both objects may or may not
+   * be closed (by having their function close() called before). If this
+   * object was closed before, then it will be closed afterwards as well.
+   * Note, however, that if the other argument is closed, then merging may be
+   * significantly faster.
+   *
+   * Using the default value of the second arguments, the constraints in each
+   * of the two objects (the old one represented by this object and the
+   * argument) may not refer to the same degree of freedom, i.e. a degree of
+   * freedom that is constrained in one object may not be constrained in the
+   * second. If this is nevertheless the case, an exception is thrown.
+   * However, this behavior can be changed by providing a different value for
+   * the second argument.
+   *
+   * By default, merging two AffineConstraints objects that are initialized
+   * with different IndexSet objects is not allowed.
+   * This behavior can be altered by setting @p allow_different_local_lines
+   * appropriately.
+   *
+   * Merging a AffineConstraints that is initialized with an IndexSet
+   * and one that is not initialized with an IndexSet is not yet implemented.
+   */
+  void merge (const AffineConstraints &other_constraints,
+              const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed,
+              const bool allow_different_local_lines = false);
+
+  /**
+   * Shift all entries of this matrix down @p offset rows and over @p offset
+   * columns. If this object is initialized with an IndexSet, local_lines are
+   * shifted as well.
+   *
+   * This function is useful if you are building block matrices, where all
+   * blocks are built by the same DoFHandler object, i.e. the matrix size is
+   * larger than the number of degrees of freedom. Since several matrix rows
+   * and columns correspond to the same degrees of freedom, you'd generate
+   * several constraint objects, then shift them, and finally merge() them
+   * together again.
+   */
+  void shift (const size_type offset);
+
+  /**
+   * Clear all entries of this matrix. Reset the flag determining whether new
+   * entries are accepted or not.
+   *
+   * This function may be called also on objects which are empty or already
+   * cleared.
+   */
+  void clear ();
+
+  /**
+   * @}
+   */
+
+
+  /**
+   * @name Querying constraints
+   * @{
+   */
+
+  /**
+   * Return number of constraints stored in this matrix.
+   */
+  size_type n_constraints () const;
+
+  /**
+   * Return whether the degree of freedom with number @p index is a
+   * constrained one.
+   *
+   * Note that if close() was called before, then this function is
+   * significantly faster, since then the constrained degrees of freedom are
+   * sorted and we can do a binary search, while before close() was called, we
+   * have to perform a linear search through all entries.
+   */
+  bool is_constrained (const size_type index) const;
+
+  /**
+   * Return whether the dof is constrained, and whether it is constrained to
+   * only one other degree of freedom with weight one. The function therefore
+   * returns whether the degree of freedom would simply be eliminated in favor
+   * of exactly one other degree of freedom.
+   *
+   * The function returns @p false if either the degree of freedom is not
+   * constrained at all, or if it is constrained to more than one other degree
+   * of freedom, or if it is constrained to only one degree of freedom but
+   * with a weight different from one.
+   */
+  bool is_identity_constrained (const size_type index) const;
+
+  /**
+   * Return whether the two given degrees of freedom are linked by an equality
+   * constraint that either constrains index1 to be so that
+   * <code>index1=index2</code> or constrains index2 so that
+   * <code>index2=index1</code>.
+   */
+  bool are_identity_constrained (const size_type index1,
+                                 const size_type index2) const;
+
+  /**
+   * Return the maximum number of other dofs that one dof is constrained to.
+   * For example, in 2d a hanging node is constrained only to its two
+   * neighbors, so the returned value would be 2. However, for higher order
+   * elements and/or higher dimensions, or other types of constraints, this
+   * number is no more obvious.
+   *
+   * The name indicates that within the system matrix, references to a
+   * constrained node are indirected to the nodes it is constrained to.
+   */
+  size_type max_constraint_indirections () const;
+
+  /**
+   * Return <tt>true</tt> in case the dof is constrained and there is a non-
+   * trivial inhomogeneous values set to the dof.
+   */
+  bool is_inhomogeneously_constrained (const size_type index) const;
+
+  /**
+   * Return <tt>false</tt> if all constraints in the AffineConstraints are
+   * homogeneous ones, and <tt>true</tt> if there is at least one
+   * inhomogeneity.
+   */
+  bool has_inhomogeneities () const;
+
+  /**
+   * Return a pointer to the vector of entries if a line is constrained,
+   * and a zero pointer in case the dof is not constrained.
+   */
+  const std::vector<std::pair<size_type,double> > *
+  get_constraint_entries (const size_type line) const;
+
+  /**
+   * Return the value of the inhomogeneity stored in the constrained dof @p
+   * line. Unconstrained dofs also return a zero value.
+   */
+  double get_inhomogeneity (const size_type line) const;
+
+  /**
+   * Print the constraints represented by the current object to the
+   * given stream.
+   *
+   * For each constraint of the form
+   * @f[
+   *  x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75
+   * @f]
+   * this function will write a sequence of lines that look like this:
+   * @code
+   *   42 2 : 0.5
+   *   42 14 : 0.25
+   *   42 : 2.75
+   * @endcode
+   * The last line is only shown if the inhomogeneity (here: 2.75) is
+   * nonzero.
+   *
+   * A block of lines such as the one above is repeated for each
+   * constrained degree of freedom.
+   */
+  void print (std::ostream &out) const;
+
+  /**
+   * Write the graph of constraints in 'dot' format. 'dot' is a program that
+   * can take a list of nodes and produce a graphical representation of the
+   * graph of constrained degrees of freedom and the degrees of freedom they
+   * are constrained to.
+   *
+   * The output of this function can be used as input to the 'dot' program
+   * that can convert the graph into a graphical representation in postscript,
+   * png, xfig, and a number of other formats.
+   *
+   * This function exists mostly for debugging purposes.
+   */
+  void write_dot (std::ostream &) const;
+
+  /**
+   * Determine an estimate for the memory consumption (in bytes) of this
+   * object.
+   */
+  std::size_t memory_consumption () const;
+
+  /**
+   * Add the constraint indices associated to the indices in the given vector.
+   * After a call to this function, the indices vector contains the initial
+   * elements and all the associated constrained indices. This function sorts
+   * the elements and suppresses duplicates.
+   */
+  void resolve_indices(std::vector<types::global_dof_index> &indices) const;
+
+  /**
+   * @}
+   */
+
+  /**
+   * @name Eliminating constraints from linear systems after their creation
+   * @{
+   */
+
+
+  /**
+   * Condense a sparsity pattern. The name of the function mimics the name of
+   * the function we use to condense linear systems, but it is a bit of a
+   * misnomer for the current context. This is because in the context of
+   * linear systems, we eliminate certain rows and columns of the linear
+   * system, i.e., we "reduce" or "condense" the linear system. On the other
+   * hand, in the current context, the functions does not remove nonzero
+   * entries from the sparsity pattern. Rather, it adds those nonzero entry
+   * locations to the sparsity pattern that will later be needed for the
+   * process of condensation of constrained degrees of freedom from a linear
+   * system.
+   *
+   * Since this function adds new nonzero entries to the sparsity pattern, the
+   * given sparsity pattern must not be compressed. The constraint matrix
+   * (i.e., the current object) must be closed. The sparsity pattern is
+   * compressed at the end of the function.
+   */
+  void condense (SparsityPattern &sparsity) const;
+
+  /**
+   * Same function as above, but condenses square block sparsity patterns.
+   */
+  void condense (BlockSparsityPattern &sparsity) const;
+
+  /**
+   * Same function as above, but condenses square compressed sparsity
+   * patterns.
+   */
+  void condense (DynamicSparsityPattern &sparsity) const;
+
+  /**
+   * Same function as above, but condenses square compressed sparsity
+   * patterns.
+   */
+  void condense (BlockDynamicSparsityPattern &sparsity) const;
+
+  /**
+   * Condense a given matrix, i.e., eliminate the rows and columns of the
+   * matrix that correspond to constrained degrees of freedom.
+   *
+   * See the general documentation of this class for more detailed
+   * information.
+   */
+  template <typename number>
+  void condense (SparseMatrix<number> &matrix) const;
+
+  /**
+   * Same function as above, but condenses square block sparse matrices.
+   */
+  template <typename number>
+  void condense (BlockSparseMatrix<number> &matrix) const;
+
+  /**
+   * Condense the given vector in-place. The @p VectorType may be a
+   * Vector<float>, Vector<double>, BlockVector<tt><...></tt>, a PETSc or
+   * Trilinos vector wrapper class, or any other type having the same
+   * interface. Note that this function does not take any inhomogeneity into
+   * account and throws an exception in case there are any inhomogeneities.
+   * Use the function using both a matrix and vector for that case.
+   *
+   * @note This function does not work for MPI vectors. Use condense() with
+   * two vector arguments instead.
+   */
+  template <class VectorType>
+  void condense (VectorType &vec) const;
+
+  /**
+   * The function copies and condenses values from @p vec_ghosted into @p
+   * output. In a serial code it is equivalent to calling condense (vec). If
+   * called in parallel, @p vec_ghosted is supposed to contain ghost elements
+   * while @p output should not.
+   */
+  template <class VectorType>
+  void condense (const VectorType &vec_ghosted,
+                 VectorType       &output) const;
+
+  /**
+   * Condense a given matrix and a given vector by eliminating rows and
+   * columns of the linear system that correspond to constrained degrees of
+   * freedom. The sparsity pattern associated with the matrix needs to be
+   * condensed and compressed.  This function is the appropriate choice for
+   * applying inhomogeneous constraints.
+   *
+   * The constraint matrix object must be closed to call this function.
+   *
+   * See the general documentation of this class for more detailed
+   * information.
+   */
+  template <typename number, class VectorType>
+  void condense (SparseMatrix<number> &matrix,
+                 VectorType           &vector) const;
+
+  /**
+   * Same function as above, but condenses square block sparse matrices and
+   * vectors.
+   */
+  template <typename number, class BlockVectorType>
+  void condense (BlockSparseMatrix<number> &matrix,
+                 BlockVectorType           &vector) const;
+
+  /**
+   * Set the values of all constrained DoFs in a vector to zero.  The @p
+   * VectorType may be a Vector<float>, Vector<double>,
+   * BlockVector<tt><...></tt>, a PETSc or Trilinos vector wrapper class, or
+   * any other type having the same interface.
+   */
+  template <class VectorType>
+  void set_zero (VectorType &vec) const;
+
+  /**
+   * @}
+   */
+
+  /**
+   * @name Eliminating constraints from linear systems during their creation
+   * @{
+   */
+
+  /**
+   * This function takes a vector of local contributions (@p local_vector)
+   * corresponding to the degrees of freedom indices given in @p
+   * local_dof_indices and distributes them to the global vector. In most
+   * cases, these local contributions will be the result of an integration
+   * over a cell or face of a cell. However, as long as @p local_vector and @p
+   * local_dof_indices have the same number of elements, this function is
+   * happy with whatever it is given.
+   *
+   * In contrast to the similar function in the DoFAccessor class, this
+   * function also takes care of constraints, i.e. if one of the elements of
+   * @p local_dof_indices belongs to a constrained node, then rather than
+   * writing the corresponding element of @p local_vector into @p
+   * global_vector, the element is distributed to the entries in the global
+   * vector to which this particular degree of freedom is constrained.
+   *
+   * Thus, by using this function to distribute local contributions to the
+   * global object, one saves the call to the condense function after the
+   * vectors and matrices are fully assembled. On the other hand, by
+   * consequence, the function does not only write into the entries enumerated
+   * by the @p local_dof_indices array, but also (possibly) others as
+   * necessary.
+   *
+   * Note that this function will apply all constraints as if they were
+   * homogeneous. For correctly setting inhomogeneous constraints, use the
+   * similar function with a matrix argument or the function with both matrix
+   * and vector arguments.
+   *
+   * @note This function in itself is thread-safe, i.e., it works properly
+   * also when several threads call it simultaneously. However, the function
+   * call is only thread-safe if the underlying global vector allows for
+   * simultaneous access and the access is not to rows with the same global
+   * index at the same time. This needs to be made sure from the caller's
+   * site. There is no locking mechanism inside this method to prevent data
+   * races.
+   *
+   * @param[in] local_vector Vector of local contributions.
+   * @param[in] local_dof_indices Local degrees of freedom indices
+   * corresponding to the vector of local contributions.
+   * @param[out]  global_vector The global vector to which all local
+   * contributions will be added.
+   */
+  template <class InVector, class OutVector>
+  void
+  distribute_local_to_global (const InVector               &local_vector,
+                              const std::vector<size_type> &local_dof_indices,
+                              OutVector                    &global_vector) const;
+
+  /**
+   * This function takes a vector of local contributions (@p local_vector)
+   * corresponding to the degrees of freedom indices given in @p
+   * local_dof_indices and distributes them to the global vector. In most
+   * cases, these local contributions will be the result of an integration
+   * over a cell or face of a cell. However, as long as @p local_vector and @p
+   * local_dof_indices have the same number of elements, this function is
+   * happy with whatever it is given.
+   *
+   * In contrast to the similar function in the DoFAccessor class, this
+   * function also takes care of constraints, i.e. if one of the elements of
+   * @p local_dof_indices belongs to a constrained node, then rather than
+   * writing the corresponding element of @p local_vector into @p
+   * global_vector, the element is distributed to the entries in the global
+   * vector to which this particular degree of freedom is constrained.
+   *
+   * Thus, by using this function to distribute local contributions to the
+   * global object, one saves the call to the condense function after the
+   * vectors and matrices are fully assembled. On the other hand, by
+   * consequence, the function does not only write into the entries enumerated
+   * by the @p local_dof_indices array, but also (possibly) others as
+   * necessary. This includes writing into diagonal elements of the matrix if
+   * the corresponding degree of freedom is constrained.
+   *
+   * The fourth argument <tt>local_matrix</tt> is intended to be used in case
+   * one wants to apply inhomogeneous constraints on the vector only. Such a
+   * situation could be where one wants to assemble of a right hand side
+   * vector on a problem with inhomogeneous constraints, but the global matrix
+   * has been assembled previously. A typical example of this is a time
+   * stepping algorithm where the stiffness matrix is assembled once, and the
+   * right hand side updated every time step. Note that, however, the entries
+   * in the columns of the local matrix have to be exactly the same as those
+   * that have been written into the global matrix. Otherwise, this function
+   * will not be able to correctly handle inhomogeneities.
+   *
+   * @note This function in itself is thread-safe, i.e., it works properly
+   * also when several threads call it simultaneously. However, the function
+   * call is only thread-safe if the underlying global vector allows for
+   * simultaneous access and the access is not to rows with the same global
+   * index at the same time. This needs to be made sure from the caller's
+   * site. There is no locking mechanism inside this method to prevent data
+   * races.
+   */
+  template <typename VectorType, typename LocalType>
+  void
+  distribute_local_to_global (const Vector<LocalType>      &local_vector,
+                              const std::vector<size_type> &local_dof_indices,
+                              VectorType                   &global_vector,
+                              const FullMatrix<LocalType>  &local_matrix) const;
+
+  /**
+   * Same as the previous function, except that it uses two (possibly) different
+   * index sets to correctly handle inhomogeneities when the local matrix is
+   * computed from a combination of two neighboring elements, for example for an
+   * edge integral term in DG. Note that in the case that these two elements have
+   * different polynomial degree, the local matrix is rectangular.
+   *
+   * <tt>local_dof_indices_row</tt> is the set of row indices and
+   * <tt>local_dof_indices_col</tt> is the set of column indices of the local matrix.
+   * <tt>diagonal=false</tt> says whether the two index sets are equal or not.
+   *
+   * If both index sets are equal, <tt>diagonal</tt> must be set to true or we
+   * simply use the previous function. If both index sets are different (diagonal=false)
+   * the <tt>global_vector</tt> is modified to handle inhomogeneities but no
+   * entries from <tt>local_vector</tt> are added. Note that the edge integrals for inner
+   * edged for DG do not contribute any values to the right hand side.
+   */
+  template <typename VectorType, typename LocalType>
+  void
+  distribute_local_to_global (const Vector<LocalType>      &local_vector,
+                              const std::vector<size_type> &local_dof_indices_row,
+                              const std::vector<size_type> &local_dof_indices_col,
+                              VectorType                   &global_vector,
+                              const FullMatrix<LocalType>  &local_matrix,
+                              bool diagonal = false) const;
+
+  /**
+   * Enter a single value into a result vector, obeying constraints.
+   */
+  template <class VectorType>
+  void
+  distribute_local_to_global (const size_type index,
+                              const double    value,
+                              VectorType     &global_vector) const;
+
+  /**
+   * This function takes a pointer to a vector of local contributions (@p
+   * local_vector) corresponding to the degrees of freedom indices given in @p
+   * local_dof_indices and distributes them to the global vector. In most
+   * cases, these local contributions will be the result of an integration
+   * over a cell or face of a cell. However, as long as the entries in @p
+   * local_dof_indices indicate reasonable global vector entries, this
+   * function is happy with whatever it is given.
+   *
+   * If one of the elements of @p local_dof_indices belongs to a constrained
+   * node, then rather than writing the corresponding element of @p
+   * local_vector into @p global_vector, the element is distributed to the
+   * entries in the global vector to which this particular degree of freedom
+   * is constrained.
+   *
+   * Thus, by using this function to distribute local contributions to the
+   * global object, one saves the call to the condense function after the
+   * vectors and matrices are fully assembled. Note that this function
+   * completely ignores inhomogeneous constraints.
+   *
+   * @note This function in itself is thread-safe, i.e., it works properly
+   * also when several threads call it simultaneously. However, the function
+   * call is only thread-safe if the underlying global vector allows for
+   * simultaneous access and the access is not to rows with the same global
+   * index at the same time. This needs to be made sure from the caller's
+   * site. There is no locking mechanism inside this method to prevent data
+   * races.
+   */
+  template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+            class VectorType>
+  void
+  distribute_local_to_global (ForwardIteratorVec local_vector_begin,
+                              ForwardIteratorVec local_vector_end,
+                              ForwardIteratorInd local_indices_begin,
+                              VectorType        &global_vector) const;
+
+  /**
+   * This function takes a matrix of local contributions (@p local_matrix)
+   * corresponding to the degrees of freedom indices given in @p
+   * local_dof_indices and distributes them to the global matrix. In most
+   * cases, these local contributions will be the result of an integration
+   * over a cell or face of a cell. However, as long as @p local_matrix and @p
+   * local_dof_indices have the same number of elements, this function is
+   * happy with whatever it is given.
+   *
+   * In contrast to the similar function in the DoFAccessor class, this
+   * function also takes care of constraints, i.e. if one of the elements of
+   * @p local_dof_indices belongs to a constrained node, then rather than
+   * writing the corresponding element of @p local_matrix into @p
+   * global_matrix, the element is distributed to the entries in the global
+   * matrix to which this particular degree of freedom is constrained.
+   *
+   * With this scheme, we never write into rows or columns of constrained
+   * degrees of freedom. In order to make sure that the resulting matrix can
+   * still be inverted, we need to do something with the diagonal elements
+   * corresponding to constrained nodes. Thus, if a degree of freedom in @p
+   * local_dof_indices is constrained, we distribute the corresponding entries
+   * in the matrix, but also add the absolute value of the diagonal entry of
+   * the local matrix to the corresponding entry in the global matrix.
+   * Assuming the discretized operator is positive definite, this guarantees
+   * that the diagonal entry is always non-zero, positive, and of the same
+   * order of magnitude as the other entries of the matrix. On the other hand,
+   * when solving a source problem $Au=f$ the exact value of the diagonal
+   * element is not important, since the value of the respective degree of
+   * freedom will be overwritten by the distribute() call later on anyway.
+   *
+   * @note The procedure described above adds an unforeseeable number of
+   * artificial eigenvalues to the spectrum of the matrix. Therefore, it is
+   * recommended to use the equivalent function with two local index vectors
+   * in such a case.
+   *
+   * By using this function to distribute local contributions to the global
+   * object, one saves the call to the condense function after the vectors and
+   * matrices are fully assembled.
+   *
+   * @note This function in itself is thread-safe, i.e., it works properly
+   * also when several threads call it simultaneously. However, the function
+   * call is only thread-safe if the underlying global matrix allows for
+   * simultaneous access and the access is not to rows with the same global
+   * index at the same time. This needs to be made sure from the caller's
+   * site. There is no locking mechanism inside this method to prevent data
+   * races.
+   */
+  template <typename MatrixType>
+  void
+  distribute_local_to_global (const FullMatrix<typename MatrixType::value_type> &local_matrix,
+                              const std::vector<size_type> &local_dof_indices,
+                              MatrixType                   &global_matrix) const;
+
+  /**
+   * Does almost the same as the function above but can treat general
+   * rectangular matrices.  The main difference to achieve this is that the
+   * diagonal entries in constrained rows are left untouched instead of being
+   * filled with arbitrary values.
+   *
+   * Since the diagonal entries corresponding to eliminated degrees of freedom
+   * are not set, the result may have a zero eigenvalue, if applied to a
+   * square matrix. This has to be considered when solving the resulting
+   * problems. For solving a source problem $Au=f$, it is possible to set the
+   * diagonal entry after building the matrix by a piece of code of the form
+   *
+   * @code
+   *   for (unsigned int i=0;i<matrix.m();++i)
+   *     if (constraints.is_constrained(i))
+   *       matrix.diag_element(i) = 1.;
+   * @endcode
+   *
+   * The value of one which is used here is arbitrary, but in the context of
+   * Krylov space methods uncritical, since it corresponds to an invariant
+   * subspace. If the other matrix entries are smaller or larger by a factor
+   * close to machine accuracy, it may be advisable to adjust it.
+   *
+   * For solving eigenvalue problems, this will only add one spurious zero
+   * eigenvalue (with a multiplicity that is possibly greater than one).
+   * Taking this into account, nothing else has to be changed.
+   */
+  template <typename MatrixType>
+  void
+  distribute_local_to_global (const FullMatrix<typename MatrixType::value_type> &local_matrix,
+                              const std::vector<size_type> &row_indices,
+                              const std::vector<size_type> &col_indices,
+                              MatrixType                   &global_matrix) const;
+
+  /**
+   * Does almost the same as the function above for general rectangular
+   * matrices but uses different AffineConstraints objects on the row and
+   * column indices. The convention is that row indices are constrained
+   * according to the calling AffineConstraints <code>*this</code>, whereas
+   * column indices are constrained according to the given AffineConstraints
+   * <code>column_affine_constraints</code>. This function allows to handle the
+   * case where rows and columns of a matrix are represented by different
+   * function spaces with their own enumeration of indices, as e.g. in mixed
+   * finite element problems with separate DoFHandler objects or for flux
+   * matrices between different levels in multigrid methods.
+   *
+   * Like the other method with separate slots for row and column indices,
+   * this method does not add diagonal entries to eliminated degrees of
+   * freedom. See there for a more elaborate description.
+   */
+  template <typename MatrixType>
+  void distribute_local_to_global(
+    const FullMatrix<typename MatrixType::value_type> &local_matrix,
+    const std::vector<size_type> &row_indices,
+    const AffineConstraints &column_affine_constraints,
+    const std::vector<size_type> &column_indices,
+    MatrixType &global_matrix) const;
+
+  /**
+   * This function simultaneously writes elements into matrix and vector,
+   * according to the constraints specified by the calling AffineConstraints.
+   * This function can correctly handle inhomogeneous constraints as well. For
+   * the parameter use_inhomogeneities_for_rhs see the documentation in
+   * @ref constraints
+   * module.
+   *
+   * @note This function in itself is thread-safe, i.e., it works properly
+   * also when several threads call it simultaneously. However, the function
+   * call is only thread-safe if the underlying global matrix and vector allow
+   * for simultaneous access and the access is not to rows with the same
+   * global index at the same time. This needs to be made sure from the
+   * caller's site. There is no locking mechanism inside this method to
+   * prevent data races.
+   */
+  template <typename MatrixType, typename VectorType>
+  void
+  distribute_local_to_global (const FullMatrix<typename MatrixType::value_type> &local_matrix,
+                              const Vector<typename VectorType::value_type>     &local_vector,
+                              const std::vector<size_type>  &local_dof_indices,
+                              MatrixType                    &global_matrix,
+                              VectorType                    &global_vector,
+                              bool                          use_inhomogeneities_for_rhs = false) const;
+
+  /**
+   * Do a similar operation as the distribute_local_to_global() function that
+   * distributes writing entries into a matrix for constrained degrees of
+   * freedom, except that here we don't write into a matrix but only allocate
+   * sparsity pattern entries.
+   *
+   * As explained in the
+   * @ref hp_paper "hp paper"
+   * and in step-27, first allocating a sparsity pattern and later coming back
+   * and allocating additional entries for those matrix entries that will be
+   * written to due to the elimination of constrained degrees of freedom
+   * (using AffineConstraints::condense() ), can be a very expensive procedure.
+   * It is cheaper to allocate these entries right away without having to do a
+   * second pass over the sparsity pattern object. This function does exactly
+   * that.
+   *
+   * Because the function only allocates entries in a sparsity pattern, all it
+   * needs to know are the degrees of freedom that couple to each other.
+   * Unlike the previous function, no actual values are written, so the second
+   * input argument is not necessary here.
+   *
+   * The third argument to this function, keep_constrained_entries determines
+   * whether the function shall allocate entries in the sparsity pattern at
+   * all for entries that will later be set to zero upon condensation of the
+   * matrix. These entries are necessary if the matrix is built unconstrained,
+   * and only later condensed. They are not necessary if the matrix is built
+   * using the distribute_local_to_global() function of this class which
+   * distributes entries right away when copying a local matrix into a global
+   * object. The default of this argument is true, meaning to allocate the few
+   * entries that may later be set to zero.
+   *
+   * By default, the function adds entries for all pairs of indices given in
+   * the first argument to the sparsity pattern (unless
+   * keep_constrained_entries is false). However, sometimes one would like to
+   * only add a subset of all of these pairs. In that case, the last argument
+   * can be used which specifies a boolean mask which of the pairs of indices
+   * should be considered. If the mask is false for a pair of indices, then no
+   * entry will be added to the sparsity pattern for this pair, irrespective
+   * of whether one or both of the indices correspond to constrained degrees
+   * of freedom.
+   *
+   * This function is not typically called from user code, but is used in the
+   * DoFTools::make_sparsity_pattern() function when passed a constraint
+   * matrix object.
+   *
+   * @note This function in itself is thread-safe, i.e., it works properly
+   * also when several threads call it simultaneously. However, the function
+   * call is only thread-safe if the underlying global sparsity pattern allows
+   * for simultaneous access and the access is not to rows with the same
+   * global index at the same time. This needs to be made sure from the
+   * caller's site. There is no locking mechanism inside this method to
+   * prevent data races.
+   */
+  template <typename SparsityPatternType>
+  void
+  add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+                               SparsityPatternType          &sparsity_pattern,
+                               const bool                    keep_constrained_entries = true,
+                               const Table<2,bool>          &dof_mask                 = default_empty_table) const;
+
+  /**
+   * Similar to the other function, but for non-quadratic sparsity patterns.
+   */
+  template <typename SparsityPatternType>
+  void
+  add_entries_local_to_global (const std::vector<size_type> &row_indices,
+                               const std::vector<size_type> &col_indices,
+                               SparsityPatternType          &sparsity_pattern,
+                               const bool                    keep_constrained_entries = true,
+                               const Table<2,bool>          &dof_mask                 = default_empty_table) const;
+
+  /**
+   * This function imports values from a global vector (@p global_vector) by
+   * applying the constraints to a vector of local values, expressed in
+   * iterator format.  In most cases, the local values will be identified by
+   * the local dof values on a cell. However, as long as the entries in @p
+   * local_dof_indices indicate reasonable global vector entries, this
+   * function is happy with whatever it is given.
+   *
+   * If one of the elements of @p local_dof_indices belongs to a constrained
+   * node, then rather than writing the corresponding element of @p
+   * global_vector into @p local_vector, the constraints are resolved as the
+   * respective distribute function does, i.e., the local entry is constructed
+   * from the global entries to which this particular degree of freedom is
+   * constrained.
+   *
+   * In contrast to the similar function get_dof_values in the DoFAccessor
+   * class, this function does not need the constrained values to be correctly
+   * set (i.e., distribute to be called).
+   */
+  template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+            class VectorType>
+  void
+  get_dof_values (const VectorType  &global_vector,
+                  ForwardIteratorInd local_indices_begin,
+                  ForwardIteratorVec local_vector_begin,
+                  ForwardIteratorVec local_vector_end) const;
+
+  /**
+   * @}
+   */
+
+  /**
+   * @name Dealing with constraints after solving a linear system
+   * @{
+   */
+
+  /**
+   * Given a vector, set all constrained degrees of freedom to values so
+   * that the constraints are satisfied. For example, if the current object
+   * stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this
+   * function will read the values of $x_1$ and $x_1$ from the given vector
+   * and set the element $x_3$ according to this constraints. Similarly, if
+   * the current object stores the constraint $x_{42}=208$, then this
+   * function will set the 42nd element of the given vector to 208.
+   *
+   * @note If this function is called with a parallel vector @p vec, then the
+   * vector must not contain ghost elements.
+   */
+  template <class VectorType>
+  void distribute (VectorType &vec) const;
+
+  /**
+   * @}
+   */
+
+
+
+  /**
+   * This class represents one line of a constraint matrix.
+   */
+  struct ConstraintLine
+  {
+    /**
+     * A data type in which we store the list of entries that make up the
+     * homogenous part of a constraint.
+     */
+    typedef std::vector<std::pair<size_type,double> > Entries;
+
+    /**
+     * Global DoF index of this line. Since only very few lines are stored,
+     * we can not assume a specific order and have to store the index
+     * explicitly.
+     */
+    size_type index;
+
+    /**
+     * Row numbers and values of the entries in this line.
+     *
+     * For the reason why we use a vector instead of a map and the
+     * consequences thereof, the same applies as what is said for
+     * AffineConstraints::lines.
+     */
+    Entries entries;
+
+    /**
+     * Value of the inhomogeneity.
+     */
+    double inhomogeneity;
+
+    /**
+     * This operator is a bit weird and unintuitive: it compares the line
+     * numbers of two lines. We need this to sort the lines; in fact we could
+     * do this using a comparison predicate.  However, this way, it is easier,
+     * albeit unintuitive since two lines really have no god-given order
+     * relation.
+     */
+    bool operator < (const ConstraintLine &) const;
+
+    /**
+     * This operator is likewise weird: it checks whether the line indices of
+     * the two operands are equal, irrespective of the fact that the contents
+     * of the line may be different.
+     */
+    bool operator == (const ConstraintLine &) const;
+
+    /**
+     * Determine an estimate for the memory consumption (in bytes) of this
+     * object.
+     */
+    std::size_t memory_consumption () const;
+
+    /**
+     * Support for boost:serialization.
+     */
+    template <class Archive>
+    void serialize(Archive &ar, const unsigned int)
+    {
+      ar &index &entries &inhomogeneity;
+    }
+
+  };
+
+
+  /**
+   * Typedef for the iterator type that is used in the LineRange container.
+   */
+  typedef std::vector<ConstraintLine>::const_iterator const_iterator;
+
+
+  /**
+   * Typedef for the return type used by get_lines().
+   */
+  typedef boost::iterator_range<const_iterator> LineRange;
+
+
+  /**
+   * Return a range object containing (const) iterators to all line entries
+   * stored in the AffineConstraints container. Such a range is useful to
+   * initialize range-based for loops as supported by C++11.
+   *
+   * @return A range object for the half open range <code>[this->begin(),
+   * this->end())</code> of line entries.
+   */
+  const LineRange get_lines() const;
+
+
+  /**
+   * Check if the current object is consistent on all processors
+   * in a distributed computation.
+   *
+   * This method checks if all processors agree on the constraints for their
+   * local lines as given by @p locally_active_dofs. This method is a collective
+   * operation and will return @p true only if all processors are consistent.
+   *
+   * Please supply the owned DoFs per processor as returned by
+   * DoFHandler::locally_owned_dofs_per_processor() as @p locally_owned_dofs
+   * and the result of DoFTools::extract_locally_active_dofs() as
+   * @p locally_active_dofs. The
+   * former is used to determine ownership of the specific DoF, while the latter
+   * is used as the set of rows that need to be checked.
+   *
+   * If @p verbose is set to @p true, additional debug information is written
+   * to std::cout.
+   *
+   * @note This method exchanges all constraint information of locally active
+   * lines and is as such slow for large computations and should probably
+   * only be used in debug mode. We do not check all lines returned by
+   * get_local_lines() but only the locally active ones, as we allow processors
+   * to not know about some locally relevant rows.
+   *
+   * @return Whether all AffineConstraints objects are consistent. Returns
+   * the same value on all processors.
+   */
+  bool is_consistent_in_parallel(const std::vector<IndexSet> &locally_owned_dofs,
+                                 const IndexSet &locally_active_dofs,
+                                 const MPI_Comm mpi_communicator,
+                                 const bool verbose=false) const;
+
+
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException0 (ExcMatrixIsClosed);
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException0 (ExcMatrixNotClosed);
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException1 (ExcLineInexistant,
+                  size_type,
+                  << "The specified line " << arg1
+                  << " does not exist.");
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException4 (ExcEntryAlreadyExists,
+                  size_type, size_type, double, double,
+                  << "The entry for the indices " << arg1 << " and "
+                  << arg2 << " already exists, but the values "
+                  << arg3 << " (old) and " << arg4 << " (new) differ "
+                  << "by " << (arg4-arg3) << ".");
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException2 (ExcDoFConstrainedToConstrainedDoF,
+                  int, int,
+                  << "You tried to constrain DoF " << arg1
+                  << " to DoF " << arg2
+                  << ", but that one is also constrained. This is not allowed!");
+  /**
+   * Exception.
+   *
+   * @ingroup Exceptions
+   */
+  DeclException1 (ExcDoFIsConstrainedFromBothObjects,
+                  size_type,
+                  << "Degree of freedom " << arg1
+                  << " is constrained from both object in a merge operation.");
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException1 (ExcDoFIsConstrainedToConstrainedDoF,
+                  size_type,
+                  << "In the given argument a degree of freedom is constrained "
+                  << "to another DoF with number " << arg1
+                  << ", which however is constrained by this object. This is not"
+                  << " allowed.");
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException1 (ExcRowNotStoredHere,
+                  size_type,
+                  << "The index set given to this constraint matrix indicates "
+                  << "constraints for degree of freedom " << arg1
+                  << " should not be stored by this object, but a constraint "
+                  << "is being added.");
+
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException2 (ExcColumnNotStoredHere,
+                  size_type,
+                  size_type,
+                  << "The index set given to this constraint matrix indicates "
+                  << "constraints using degree of freedom " << arg2
+                  << " should not be stored by this object, but a constraint "
+                  << "for degree of freedom " << arg1 <<" uses it.");
+
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException2 (ExcIncorrectConstraint,
+                  int, int,
+                  << "While distributing the constraint for DoF "
+                  << arg1 << ", it turns out that one of the processors "
+                  << "who own the " << arg2
+                  << " degrees of freedom that x_" << arg1
+                  << " is constrained against does not know about "
+                  << "the constraint on x_" << arg1
+                  << ". Did you not initialize the AffineConstraints container "
+                  << "with the appropriate locally_relevant set so "
+                  << "that every processor who owns a DoF that constrains "
+                  << "another DoF also knows about this constraint?");
+
+private:
+
+  /**
+   * Store the lines of the matrix.  Entries are usually appended in an
+   * arbitrary order and insertion into a vector is done best at the end, so
+   * the order is unspecified after all entries are inserted. Sorting of the
+   * entries takes place when calling the <tt>close()</tt> function.
+   *
+   * We could, instead of using a vector, use an associative array, like a map
+   * to store the lines. This, however, would mean a much more fragmented heap
+   * since it allocates many small objects, and would additionally make usage
+   * of this matrix much slower.
+   */
+  std::vector<ConstraintLine> lines;
+
+  /**
+   * A list of size_type that contains the position of the ConstraintLine of a
+   * constrained degree of freedom, or numbers::invalid_size_type if the
+   * degree of freedom is not constrained. The numbers::invalid_size_type
+   * return value returns thus whether there is a constraint line for a given
+   * degree of freedom index. Note that this class has no notion of how many
+   * degrees of freedom there really are, so if we check whether there is a
+   * constraint line for a given degree of freedom, then this vector may
+   * actually be shorter than the index of the DoF we check for.
+   *
+   * This field exists since when adding a new constraint line we have to
+   * figure out whether it already exists. Previously, we would simply walk
+   * the unsorted list of constraint lines until we either hit the end or
+   * found it. This algorithm is O(N) if N is the number of constraints, which
+   * makes it O(N^2) when inserting all constraints. For large problems with
+   * many constraints, this could easily take 5-10 per cent of the total run
+   * time. With this field, we can save this time since we find any constraint
+   * in O(1) time or get to know that it a certain degree of freedom is not
+   * constrained.
+   *
+   * To make things worse, traversing the list of existing constraints
+   * requires reads from many different places in memory. Thus, in large 3d
+   * applications, the add_line() function showed up very prominently in the
+   * overall compute time, mainly because it generated a lot of cache misses.
+   * This should also be fixed by using the O(1) algorithm to access the
+   * fields of this array.
+   *
+   * The field is useful in a number of other contexts as well, e.g. when one
+   * needs random access to the constraints as in all the functions that apply
+   * constraints on the fly while add cell contributions into vectors and
+   * matrices.
+   */
+  std::vector<size_type> lines_cache;
+
+  /**
+   * This IndexSet is used to limit the lines to save in the AffineConstraints
+   * to a subset. This is necessary, because the lines_cache vector would
+   * become too big in a distributed calculation.
+   */
+  IndexSet local_lines;
+
+  /**
+   * Store whether the arrays are sorted.  If so, no new entries can be added.
+   */
+  bool sorted;
+
+  /**
+   * Internal function to calculate the index of line @p line in the vector
+   * lines_cache using local_lines.
+   */
+  size_type calculate_line_index (const size_type line) const;
+
+  /**
+   * Return @p true if the weight of an entry (the second element of the pair)
+   * equals zero. This function is used to delete entries with zero weight.
+   */
+  static bool check_zero_weight (const std::pair<size_type, double> &p);
+
+  /**
+   * Dummy table that serves as default argument for function
+   * <tt>add_entries_local_to_global()</tt>.
+   */
+  static const Table<2,bool> default_empty_table;
+
+  /**
+   * This function actually implements the local_to_global function for
+   * standard (non-block) matrices.
+   */
+  template <typename MatrixType, typename VectorType>
+  void
+  distribute_local_to_global (const FullMatrix<typename MatrixType::value_type>  &local_matrix,
+                              const Vector<typename VectorType::value_type>      &local_vector,
+                              const std::vector<size_type> &local_dof_indices,
+                              MatrixType                   &global_matrix,
+                              VectorType                   &global_vector,
+                              bool                          use_inhomogeneities_for_rhs,
+                              std::integral_constant<bool, false>) const;
+
+  /**
+   * This function actually implements the local_to_global function for block
+   * matrices.
+   */
+  template <typename MatrixType, typename VectorType>
+  void
+  distribute_local_to_global (const FullMatrix<typename MatrixType::value_type>  &local_matrix,
+                              const Vector<typename VectorType::value_type>      &local_vector,
+                              const std::vector<size_type> &local_dof_indices,
+                              MatrixType                   &global_matrix,
+                              VectorType                   &global_vector,
+                              bool                          use_inhomogeneities_for_rhs,
+                              std::integral_constant<bool, true>) const;
+
+  /**
+   * This function actually implements the local_to_global function for
+   * standard (non-block) sparsity types.
+   */
+  template <typename SparsityPatternType>
+  void
+  add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+                               SparsityPatternType          &sparsity_pattern,
+                               const bool                    keep_constrained_entries,
+                               const Table<2,bool>          &dof_mask,
+                               std::integral_constant<bool, false>) const;
+
+  /**
+   * This function actually implements the local_to_global function for block
+   * sparsity types.
+   */
+  template <typename SparsityPatternType>
+  void
+  add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+                               SparsityPatternType          &sparsity_pattern,
+                               const bool                    keep_constrained_entries,
+                               const Table<2,bool>          &dof_mask,
+                               std::integral_constant<bool, true>) const;
+
+  /**
+   * Internal helper function for distribute_local_to_global function.
+   *
+   * Creates a list of affected global rows for distribution, including the
+   * local rows where the entries come from. The list is sorted according to
+   * the global row indices.
+   */
+  void
+  make_sorted_row_list (const std::vector<size_type>   &local_dof_indices,
+                        internals::GlobalRowsFromLocal &global_rows) const;
+
+  /**
+   * Internal helper function for add_entries_local_to_global function.
+   *
+   * Creates a list of affected rows for distribution without any additional
+   * information, otherwise similar to the other make_sorted_row_list()
+   * function.
+   */
+  void
+  make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
+                        std::vector<size_type>       &active_dofs) const;
+
+  /**
+   * Internal helper function for distribute_local_to_global function.
+   */
+  template <typename MatrixScalar, typename VectorScalar>
+  typename ProductType<VectorScalar,MatrixScalar>::type
+  resolve_vector_entry (const size_type                       i,
+                        const internals::GlobalRowsFromLocal &global_rows,
+                        const Vector<VectorScalar>           &local_vector,
+                        const std::vector<size_type>         &local_dof_indices,
+                        const FullMatrix<MatrixScalar>       &local_matrix) const;
+};
+
+
+
+/* ---------------- template and inline functions ----------------- */
+
+inline
+AffineConstraints::AffineConstraints (const IndexSet &local_constraints)
+  :
+  lines (),
+  local_lines (local_constraints),
+  sorted (false)
+{
+  // make sure the IndexSet is compressed. Otherwise this can lead to crashes
+  // that are hard to find (only happen in release mode).
+  // see tests/mpi/affine_constraints_crash_01
+  local_lines.compress();
+}
+
+
+
+inline
+AffineConstraints::AffineConstraints (const AffineConstraints &affine_constraints)
+  :
+  Subscriptor (),
+  lines (affine_constraints.lines),
+  lines_cache (affine_constraints.lines_cache),
+  local_lines (affine_constraints.local_lines),
+  sorted (affine_constraints.sorted)
+{}
+
+
+inline
+void
+AffineConstraints::add_line (const size_type line)
+{
+  Assert (sorted==false, ExcMatrixIsClosed());
+
+  // the following can happen when we compute with distributed meshes and dof
+  // handlers and we constrain a degree of freedom whose number we don't have
+  // locally. if we don't abort here the program will try to allocate several
+  // terabytes of memory to resize the various arrays below :-)
+  Assert (line != numbers::invalid_size_type,
+          ExcInternalError());
+  const size_type line_index = calculate_line_index (line);
+
+  // check whether line already exists; it may, in which case we can just quit
+  if (is_constrained(line))
+    return;
+
+  // if necessary enlarge vector of existing entries for cache
+  if (line_index >= lines_cache.size())
+    lines_cache.resize (std::max(2*static_cast<size_type>(lines_cache.size()),
+                                 line_index+1),
+                        numbers::invalid_size_type);
+
+  // push a new line to the end of the list
+  lines.emplace_back ();
+  lines.back().index = line;
+  lines.back().inhomogeneity = 0.;
+  lines_cache[line_index] = lines.size()-1;
+}
+
+
+
+inline
+void
+AffineConstraints::add_entry (const size_type line,
+                              const size_type column,
+                              const double    value)
+{
+  Assert (sorted==false, ExcMatrixIsClosed());
+  Assert (line != column,
+          ExcMessage ("Can't constrain a degree of freedom to itself"));
+
+  // Ensure that the current line is present in the cache:
+  const size_type line_index = calculate_line_index(line);
+  Assert (line_index < lines_cache.size(),
+          ExcMessage("The current AffineConstraints does not contain the line "
+                     "for the current entry. Call AffineConstraints::add_line "
+                     "before calling this function."));
+
+  // if in debug mode, check whether an entry for this column already exists
+  // and if it's the same as the one entered at present
+  //
+  // in any case: exit the function if an entry for this column already
+  // exists, since we don't want to enter it twice
+  Assert (lines_cache[line_index] != numbers::invalid_size_type,
+          ExcInternalError());
+  Assert (!local_lines.size() || local_lines.is_element(column),
+          ExcColumnNotStoredHere(line, column));
+  ConstraintLine *line_ptr = &lines[lines_cache[line_index]];
+  Assert (line_ptr->index == line, ExcInternalError());
+  for (ConstraintLine::Entries::const_iterator
+       p=line_ptr->entries.begin();
+       p != line_ptr->entries.end(); ++p)
+    if (p->first == column)
+      {
+        Assert (std::fabs(p->second - value) < 1.e-14,
+                ExcEntryAlreadyExists(line, column, p->second, value));
+        return;
+      }
+
+  line_ptr->entries.emplace_back (column, value);
+}
+
+
+
+inline
+void
+AffineConstraints::set_inhomogeneity (const size_type line,
+                                      const double    value)
+{
+  const size_type line_index = calculate_line_index(line);
+  Assert( line_index < lines_cache.size() &&
+          lines_cache[line_index] != numbers::invalid_size_type,
+          ExcMessage("call add_line() before calling set_inhomogeneity()"));
+  Assert(lines_cache[line_index] < lines.size(), ExcInternalError());
+  ConstraintLine *line_ptr = &lines[lines_cache[line_index]];
+  line_ptr->inhomogeneity = value;
+}
+
+
+
+inline
+types::global_dof_index
+AffineConstraints::n_constraints () const
+{
+  return lines.size();
+}
+
+
+
+inline
+bool
+AffineConstraints::is_constrained (const size_type index) const
+{
+  const size_type line_index = calculate_line_index(index);
+  return ((line_index < lines_cache.size())
+          &&
+          (lines_cache[line_index] != numbers::invalid_size_type));
+}
+
+
+
+inline
+bool
+AffineConstraints::is_inhomogeneously_constrained (const size_type index) const
+{
+  // check whether the entry is constrained. could use is_constrained, but
+  // that means computing the line index twice
+  const size_type line_index = calculate_line_index(index);
+  if (line_index >= lines_cache.size() ||
+      lines_cache[line_index] == numbers::invalid_size_type)
+    return false;
+  else
+    {
+      Assert(lines_cache[line_index] < lines.size(), ExcInternalError());
+      return !(lines[lines_cache[line_index]].inhomogeneity == 0);
+    }
+}
+
+
+
+inline
+const std::vector<std::pair<types::global_dof_index,double> > *
+AffineConstraints::get_constraint_entries (const size_type line) const
+{
+  // check whether the entry is constrained. could use is_constrained, but
+  // that means computing the line index twice
+  const size_type line_index = calculate_line_index(line);
+  if (line_index >= lines_cache.size() ||
+      lines_cache[line_index] == numbers::invalid_size_type)
+    return nullptr;
+  else
+    return &lines[lines_cache[line_index]].entries;
+}
+
+
+
+inline
+double
+AffineConstraints::get_inhomogeneity (const size_type line) const
+{
+  // check whether the entry is constrained. could use is_constrained, but
+  // that means computing the line index twice
+  const size_type line_index = calculate_line_index(line);
+  if (line_index >= lines_cache.size() ||
+      lines_cache[line_index] == numbers::invalid_size_type)
+    return 0;
+  else
+    return lines[lines_cache[line_index]].inhomogeneity;
+}
+
+
+
+inline types::global_dof_index
+AffineConstraints::calculate_line_index (const size_type line) const
+{
+  //IndexSet is unused (serial case)
+  if (!local_lines.size())
+    return line;
+
+  Assert(local_lines.is_element(line),
+         ExcRowNotStoredHere(line));
+
+  return local_lines.index_within_set(line);
+}
+
+
+
+inline bool
+AffineConstraints::can_store_line (size_type line_index) const
+{
+  return !local_lines.size() || local_lines.is_element(line_index);
+}
+
+
+
+inline
+const IndexSet &
+AffineConstraints::get_local_lines () const
+{
+  return local_lines;
+}
+
+
+
+template <class VectorType>
+inline
+void AffineConstraints::distribute_local_to_global (
+  const size_type index,
+  const double    value,
+  VectorType     &global_vector) const
+{
+  Assert (lines.empty() || sorted == true, ExcMatrixNotClosed());
+
+  if (is_constrained(index) == false)
+    global_vector(index) += value;
+  else
+    {
+      const ConstraintLine &position =
+        lines[lines_cache[calculate_line_index(index)]];
+      for (size_type j=0; j<position.entries.size(); ++j)
+        global_vector(position.entries[j].first)
+        += value * position.entries[j].second;
+    }
+}
+
+
+template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+          class VectorType>
+inline
+void AffineConstraints::distribute_local_to_global (
+  ForwardIteratorVec local_vector_begin,
+  ForwardIteratorVec local_vector_end,
+  ForwardIteratorInd local_indices_begin,
+  VectorType        &global_vector) const
+{
+  Assert (lines.empty() || sorted == true, ExcMatrixNotClosed());
+  for ( ; local_vector_begin != local_vector_end;
+        ++local_vector_begin, ++local_indices_begin)
+    {
+      if (is_constrained(*local_indices_begin) == false)
+        internal::ElementAccess<VectorType>::add(*local_vector_begin,
+                                                 *local_indices_begin, global_vector);
+      else
+        {
+          const ConstraintLine &position =
+            lines[lines_cache[calculate_line_index(*local_indices_begin)]];
+          for (size_type j=0; j<position.entries.size(); ++j)
+            internal::ElementAccess<VectorType>::add((*local_vector_begin) * position.entries[j].second,
+                                                     position.entries[j].first,
+                                                     global_vector);
+        }
+    }
+}
+
+
+template <class InVector, class OutVector>
+inline
+void
+AffineConstraints::distribute_local_to_global (
+  const InVector               &local_vector,
+  const std::vector<size_type> &local_dof_indices,
+  OutVector                    &global_vector) const
+{
+  Assert (local_vector.size() == local_dof_indices.size(),
+          ExcDimensionMismatch(local_vector.size(), local_dof_indices.size()));
+  distribute_local_to_global (local_vector.begin(), local_vector.end(),
+                              local_dof_indices.begin(), global_vector);
+}
+
+
+
+template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+          class VectorType>
+inline
+void AffineConstraints::get_dof_values (const VectorType  &global_vector,
+                                       ForwardIteratorInd local_indices_begin,
+                                       ForwardIteratorVec local_vector_begin,
+                                       ForwardIteratorVec local_vector_end) const
+{
+  Assert (lines.empty() || sorted == true, ExcMatrixNotClosed());
+  for ( ; local_vector_begin != local_vector_end;
+        ++local_vector_begin, ++local_indices_begin)
+    {
+      if (is_constrained(*local_indices_begin) == false)
+        *local_vector_begin = global_vector(*local_indices_begin);
+      else
+        {
+          const ConstraintLine &position =
+            lines[lines_cache[calculate_line_index(*local_indices_begin)]];
+          typename VectorType::value_type value = position.inhomogeneity;
+          for (size_type j=0; j<position.entries.size(); ++j)
+            value += (global_vector(position.entries[j].first) *
+                      position.entries[j].second);
+          *local_vector_begin = value;
+        }
+    }
+}
+
+
+template <typename MatrixType> class BlockMatrixBase;
+template <typename SparsityPatternType> class BlockSparsityPatternBase;
+template <typename number>     class BlockSparseMatrixEZ;
+
+/**
+ * A class that can be used to determine whether a given type is a block
+ * matrix type or not. For example,
+ * @code
+ *   IsBlockMatrix<SparseMatrix<double> >::value
+ * @endcode
+ * has the value false, whereas
+ * @code
+ *   IsBlockMatrix<BlockSparseMatrix<double> >::value
+ * @endcode
+ * is true. This is sometimes useful in template contexts where we may want to
+ * do things differently depending on whether a template type denotes a
+ * regular or a block matrix type.
+ *
+ * @see
+ * @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2009
+ */
+template <typename MatrixType>
+struct IsBlockMatrix
+{
+private:
+  struct yes_type
+  {
+    char c[1];
+  };
+  struct no_type
+  {
+    char c[2];
+  };
+
+  /**
+   * Overload returning true if the class is derived from BlockMatrixBase,
+   * which is what block matrices do (with the exception of
+   * BlockSparseMatrixEZ).
+   */
+  template <typename T>
+  static yes_type check_for_block_matrix (const BlockMatrixBase<T> *);
+
+  /**
+   * Overload returning true if the class is derived from
+   * BlockSparsityPatternBase, which is what block sparsity patterns do.
+   */
+  template <typename T>
+  static yes_type check_for_block_matrix (const BlockSparsityPatternBase<T> *);
+
+  /**
+   * Overload for BlockSparseMatrixEZ, which is the only block matrix not
+   * derived from BlockMatrixBase at the time of writing this class.
+   */
+  template <typename T>
+  static yes_type check_for_block_matrix (const BlockSparseMatrixEZ<T> *);
+
+  /**
+   * Catch all for all other potential matrix types that are not block
+   * matrices.
+   */
+  static no_type check_for_block_matrix (...);
+
+public:
+  /**
+   * A statically computable value that indicates whether the template
+   * argument to this class is a block matrix (in fact whether the type is
+   * derived from BlockMatrixBase<T>).
+   */
+  static const bool value = (sizeof(check_for_block_matrix
+                                    ((MatrixType *)nullptr))
+                             ==
+                             sizeof(yes_type));
+};
+
+
+// instantiation of the static member
+template <typename MatrixType>
+const bool IsBlockMatrix<MatrixType>::value;
+
+
+template <typename MatrixType>
+inline
+void
+AffineConstraints::
+distribute_local_to_global (const FullMatrix<typename MatrixType::value_type>     &local_matrix,
+                            const std::vector<size_type> &local_dof_indices,
+                            MatrixType                   &global_matrix) const
+{
+  // create a dummy and hand on to the function actually implementing this
+  // feature in the cm.templates.h file.
+  Vector<typename MatrixType::value_type> dummy(0);
+  distribute_local_to_global (local_matrix, dummy, local_dof_indices,
+                              global_matrix, dummy, false,
+                              std::integral_constant<bool, IsBlockMatrix<MatrixType>::value>());
+}
+
+
+
+
+template <typename MatrixType, typename VectorType>
+inline
+void
+AffineConstraints::
+distribute_local_to_global (const FullMatrix<typename MatrixType::value_type>     &local_matrix,
+                            const Vector<typename VectorType::value_type>         &local_vector,
+                            const std::vector<size_type> &local_dof_indices,
+                            MatrixType                   &global_matrix,
+                            VectorType                   &global_vector,
+                            bool                          use_inhomogeneities_for_rhs) const
+{
+  // enter the internal function with the respective block information set,
+  // the actual implementation follows in the cm.templates.h file.
+  distribute_local_to_global (local_matrix, local_vector, local_dof_indices,
+                              global_matrix, global_vector, use_inhomogeneities_for_rhs,
+                              std::integral_constant<bool, IsBlockMatrix<MatrixType>::value>());
+}
+
+
+
+
+template <typename SparsityPatternType>
+inline
+void
+AffineConstraints::
+add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+                             SparsityPatternType          &sparsity_pattern,
+                             const bool                    keep_constrained_entries,
+                             const Table<2,bool>          &dof_mask) const
+{
+  // enter the internal function with the respective block information set,
+  // the actual implementation follows in the cm.templates.h file.
+  add_entries_local_to_global (local_dof_indices, sparsity_pattern,
+                               keep_constrained_entries, dof_mask,
+                               std::integral_constant<bool, IsBlockMatrix<SparsityPatternType>::value>());
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h
new file mode 100644 (file)
index 0000000..db9c1e2
--- /dev/null
@@ -0,0 +1,2853 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 1999 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii_affine_constraints_templates_h
+#define dealii_affine_constraints_templates_h
+
+
+#include <deal.II/lac/affine_constraints.h>
+
+#include <deal.II/base/table.h>
+#include <deal.II/base/thread_local_storage.h>
+#include <deal.II/lac/full_matrix.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/block_sparsity_pattern.h>
+#include <deal.II/lac/block_sparse_matrix.h>
+
+#include <deal.II/lac/la_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/trilinos_vector.h>
+
+#include <complex>
+#include <iomanip>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+template <typename number>
+void
+AffineConstraints::condense (SparseMatrix<number> &uncondensed) const
+{
+  Vector<number> dummy (0);
+  condense (uncondensed, dummy);
+}
+
+
+
+template <typename number>
+void
+AffineConstraints::condense (BlockSparseMatrix<number> &uncondensed) const
+{
+  BlockVector<number> dummy (0);
+  condense (uncondensed, dummy);
+}
+
+
+
+template <class VectorType>
+void
+AffineConstraints::condense (const VectorType &vec_ghosted,
+                             VectorType       &vec) const
+{
+  Assert (sorted == true, ExcMatrixNotClosed());
+
+  // if this is called with different arguments, we need to copy the data over:
+  if (&vec != &vec_ghosted)
+    vec = vec_ghosted;
+
+  // distribute all entries, and set them to zero. do so in
+  // two loops because in the first one we need to add to elements
+  // and in the second one we need to set elements to zero. for
+  // parallel vectors, this can only work if we can put a compress()
+  // in between, but we don't want to call compress() twice per entry
+  for (std::vector<ConstraintLine>::const_iterator
+       constraint_line = lines.begin();
+       constraint_line!=lines.end(); ++constraint_line)
+    {
+      // in case the constraint is
+      // inhomogeneous, this function is not
+      // appropriate. Throw an exception.
+      Assert (constraint_line->inhomogeneity == 0.,
+              ExcMessage ("Inhomogeneous constraint cannot be condensed "
+                          "without any matrix specified."));
+
+      const typename VectorType::value_type old_value = vec_ghosted(constraint_line->index);
+      for (size_type q=0; q!=constraint_line->entries.size(); ++q)
+        if (vec.in_local_range(constraint_line->entries[q].first) == true)
+          vec(constraint_line->entries[q].first)
+          += (static_cast<typename VectorType::value_type>
+              (old_value) *
+              constraint_line->entries[q].second);
+    }
+
+  vec.compress(VectorOperation::add);
+
+  for (std::vector<ConstraintLine>::const_iterator
+       constraint_line = lines.begin();
+       constraint_line!=lines.end(); ++constraint_line)
+    if (vec.in_local_range(constraint_line->index) == true)
+      vec(constraint_line->index) = 0.;
+
+  vec.compress(VectorOperation::insert);
+}
+
+
+
+template <class VectorType>
+void
+AffineConstraints::condense (VectorType &vec) const
+{
+  condense(vec, vec);
+}
+
+
+
+template <typename number, class VectorType>
+void
+AffineConstraints::condense (SparseMatrix<number> &uncondensed,
+                             VectorType           &vec) const
+{
+  // check whether we work on real vectors
+  // or we just used a dummy when calling
+  // the other function above.
+  const bool use_vectors = vec.size() == 0 ? false : true;
+
+  const SparsityPattern &sparsity = uncondensed.get_sparsity_pattern ();
+
+  Assert (sorted == true, ExcMatrixNotClosed());
+  Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed());
+  Assert (sparsity.n_rows() == sparsity.n_cols(),
+          ExcNotQuadratic());
+  if (use_vectors == true)
+    AssertDimension (vec.size(), sparsity.n_rows());
+
+  double average_diagonal = 0;
+  for (size_type i=0; i<uncondensed.m(); ++i)
+    average_diagonal += std::abs (uncondensed.diag_element(i));
+  average_diagonal /= uncondensed.m();
+
+  // store for each index whether it must be
+  // distributed or not. If entry is
+  // invalid_size_type, no distribution is
+  // necessary.  otherwise, the number states
+  // which line in the constraint matrix
+  // handles this index
+  std::vector<size_type> distribute (sparsity.n_rows(),
+                                     numbers::invalid_size_type);
+
+  for (size_type c=0; c<lines.size(); ++c)
+    distribute[lines[c].index] = c;
+
+  const size_type n_rows = sparsity.n_rows();
+  for (size_type row=0; row<n_rows; ++row)
+    {
+      if (distribute[row] == numbers::invalid_size_type)
+        // regular line. loop over cols
+        {
+          for (typename SparseMatrix<number>::iterator
+               entry = uncondensed.begin(row);
+               entry != uncondensed.end(row); ++entry)
+            {
+              const size_type column = entry->column();
+
+              // end of row reached?
+              // this should not
+              // happen, since we only
+              // operate on compressed
+              // matrices!
+              Assert (column != SparsityPattern::invalid_entry,
+                      ExcMatrixNotClosed());
+
+              if (distribute[column] != numbers::invalid_size_type)
+                // distribute entry at
+                // regular row @p row
+                // and irregular column
+                // sparsity.get_column_numbers()[j];
+                // set old entry to
+                // zero
+                {
+                  for (size_type q=0;
+                       q!=lines[distribute[column]].entries.size(); ++q)
+                    {
+                      // need a temporary variable to avoid errors like
+                      // no known conversion from 'complex<typename ProductType<float, double>::type>' to 'const complex<float>' for 3rd argument
+                      number v = static_cast<number>(entry->value());
+                      v *=lines[distribute[column]].entries[q].second;
+                      uncondensed.add (row,
+                                       lines[distribute[column]].entries[q].first,
+                                       v);
+                    }
+
+                  // need to subtract this element from the
+                  // vector. this corresponds to an
+                  // explicit elimination in the respective
+                  // row of the inhomogeneous constraint in
+                  // the matrix with Gauss elimination
+                  if (use_vectors == true)
+                    vec(row) -=
+                      static_cast<number>(entry->value()) * lines[distribute[column]].inhomogeneity;
+
+                  // set old value to zero
+                  entry->value() = 0.;
+                }
+            }
+        }
+      else
+        // row must be distributed
+        {
+          for (typename SparseMatrix<number>::iterator
+               entry = uncondensed.begin(row);
+               entry != uncondensed.end(row); ++entry)
+            {
+              const size_type column = entry->column();
+
+              // end of row reached?
+              // this should not
+              // happen, since we only
+              // operate on compressed
+              // matrices!
+              Assert (column != SparsityPattern::invalid_entry,
+                      ExcMatrixNotClosed());
+
+              if (distribute[column] == numbers::invalid_size_type)
+                // distribute entry at
+                // irregular row
+                // @p row and regular
+                // column
+                // column. set
+                // old entry to zero
+                {
+                  for (size_type q=0;
+                       q!=lines[distribute[row]].entries.size(); ++q)
+                    {
+                      // need a temporary variable to avoid errors like
+                      // no known conversion from 'complex<typename ProductType<float, double>::type>' to 'const complex<float>' for 3rd argument
+                      number v = static_cast<number>(entry->value());
+                      v *= lines[distribute[row]].entries[q].second;
+                      uncondensed.add (lines[distribute[row]].entries[q].first,
+                                       column,
+                                       v);
+                    }
+
+                  // set old entry to zero
+                  entry->value() = 0.;
+                }
+              else
+                // distribute entry at
+                // irregular row @p row and
+                // irregular column
+                // @p column set old entry
+                // to one on main
+                // diagonal, zero otherwise
+                {
+                  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+                    {
+                      for (size_type q=0;
+                           q!=lines[distribute[column]].entries.size(); ++q)
+                        {
+                          // need a temporary variable to avoid errors like
+                          // no known conversion from 'complex<typename ProductType<float, double>::type>' to 'const complex<float>' for 3rd argument
+                          number v = static_cast<number>(entry->value());
+                          v *= lines[distribute[row]].entries[p].second *
+                               lines[distribute[column]].entries[q].second;
+                          uncondensed.add (lines[distribute[row]].entries[p].first,
+                                           lines[distribute[column]].entries[q].first,
+                                           v);
+                        }
+
+                      if (use_vectors == true)
+                        vec(lines[distribute[row]].entries[p].first) -=
+                          static_cast<number>(entry->value()) * lines[distribute[row]].entries[p].second *
+                          lines[distribute[column]].inhomogeneity;
+                    }
+
+                  // set old entry to correct
+                  // value
+                  entry->value() = (row == column ? average_diagonal : 0. );
+                }
+            }
+
+          // take care of vector
+          if (use_vectors == true)
+            {
+              for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
+                vec(lines[distribute[row]].entries[q].first)
+                += (vec(row) * lines[distribute[row]].entries[q].second);
+
+              vec(lines[distribute[row]].index) = 0.;
+            }
+        }
+    }
+}
+
+
+
+template <typename number, class BlockVectorType>
+void
+AffineConstraints::condense (BlockSparseMatrix<number> &uncondensed,
+                             BlockVectorType           &vec) const
+{
+  // check whether we work on real vectors
+  // or we just used a dummy when calling
+  // the other function above.
+  const bool use_vectors = vec.n_blocks() == 0 ? false : true;
+
+  const size_type blocks = uncondensed.n_block_rows();
+
+  const BlockSparsityPattern &
+  sparsity = uncondensed.get_sparsity_pattern ();
+
+  Assert (sorted == true, ExcMatrixNotClosed());
+  Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed());
+  Assert (sparsity.n_rows() == sparsity.n_cols(),
+          ExcNotQuadratic());
+  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
+          ExcNotQuadratic());
+  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
+          ExcNotQuadratic());
+  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
+          ExcNotQuadratic());
+
+  if (use_vectors == true)
+    {
+      AssertDimension (vec.size(), sparsity.n_rows());
+      AssertDimension (vec.n_blocks(), sparsity.n_block_rows());
+    }
+
+  double average_diagonal = 0;
+  for (size_type b=0; b<uncondensed.n_block_rows(); ++b)
+    for (size_type i=0; i<uncondensed.block(b,b).m(); ++i)
+      average_diagonal += std::fabs (uncondensed.block(b,b).diag_element(i));
+  average_diagonal /= uncondensed.m();
+
+  const BlockIndices &
+  index_mapping = sparsity.get_column_indices();
+
+  // store for each index whether it must be
+  // distributed or not. If entry is
+  // numbers::invalid_size_type,
+  // no distribution is necessary.
+  // otherwise, the number states which line
+  // in the constraint matrix handles this
+  // index
+  std::vector<size_type> distribute (sparsity.n_rows(),
+                                     numbers::invalid_size_type);
+
+  for (size_type c=0; c<lines.size(); ++c)
+    distribute[lines[c].index] = c;
+
+  const size_type n_rows = sparsity.n_rows();
+  for (size_type row=0; row<n_rows; ++row)
+    {
+      // get index of this row
+      // within the blocks
+      const std::pair<size_type,size_type>
+      block_index = index_mapping.global_to_local(row);
+      const size_type block_row = block_index.first;
+
+      if (distribute[row] == numbers::invalid_size_type)
+        // regular line. loop over
+        // all columns and see
+        // whether this column must
+        // be distributed
+        {
+
+          // to loop over all entries
+          // in this row, we have to
+          // loop over all blocks in
+          // this blockrow and the
+          // corresponding row
+          // therein
+          for (size_type block_col=0; block_col<blocks; ++block_col)
+            {
+              for (typename SparseMatrix<number>::iterator
+                   entry = uncondensed.block(block_row, block_col).begin(block_index.second);
+                   entry != uncondensed.block(block_row, block_col).end(block_index.second);
+                   ++entry)
+                {
+                  const size_type global_col
+                    = index_mapping.local_to_global(block_col,entry->column());
+
+                  if (distribute[global_col] != numbers::invalid_size_type)
+                    // distribute entry at
+                    // regular row @p row
+                    // and irregular column
+                    // global_col; set old
+                    // entry to zero
+                    {
+                      const double old_value = entry->value ();
+
+                      for (size_type q=0;
+                           q!=lines[distribute[global_col]].entries.size(); ++q)
+                        uncondensed.add (row,
+                                         lines[distribute[global_col]].entries[q].first,
+                                         old_value *
+                                         lines[distribute[global_col]].entries[q].second);
+
+                      // need to subtract this element from the
+                      // vector. this corresponds to an
+                      // explicit elimination in the respective
+                      // row of the inhomogeneous constraint in
+                      // the matrix with Gauss elimination
+                      if (use_vectors == true)
+                        vec(row) -= entry->value() *
+                                    lines[distribute[global_col]].inhomogeneity;
+
+                      entry->value() = 0.;
+                    }
+                }
+            }
+        }
+      else
+        {
+          // row must be
+          // distributed. split the
+          // whole row into the
+          // chunks defined by the
+          // blocks
+          for (size_type block_col=0; block_col<blocks; ++block_col)
+            {
+              for (typename SparseMatrix<number>::iterator
+                   entry = uncondensed.block(block_row, block_col).begin(block_index.second);
+                   entry != uncondensed.block(block_row, block_col).end(block_index.second);
+                   ++entry)
+                {
+                  const size_type global_col
+                    = index_mapping.local_to_global (block_col, entry->column());
+
+                  if (distribute[global_col] ==
+                      numbers::invalid_size_type)
+                    // distribute
+                    // entry at
+                    // irregular
+                    // row @p row
+                    // and regular
+                    // column
+                    // global_col. set
+                    // old entry to
+                    // zero
+                    {
+                      const double old_value = entry->value();
+
+                      for (size_type q=0;
+                           q!=lines[distribute[row]].entries.size(); ++q)
+                        uncondensed.add (lines[distribute[row]].entries[q].first,
+                                         global_col,
+                                         old_value *
+                                         lines[distribute[row]].entries[q].second);
+
+                      entry->value() = 0.;
+                    }
+                  else
+                    // distribute entry at
+                    // irregular row @p row
+                    // and irregular column
+                    // @p global_col set old
+                    // entry to one if on
+                    // main diagonal, zero
+                    // otherwise
+                    {
+                      const double old_value = entry->value ();
+
+                      for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+                        {
+                          for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+                            uncondensed.add (lines[distribute[row]].entries[p].first,
+                                             lines[distribute[global_col]].entries[q].first,
+                                             old_value *
+                                             lines[distribute[row]].entries[p].second *
+                                             lines[distribute[global_col]].entries[q].second);
+
+                          if (use_vectors == true)
+                            vec(lines[distribute[row]].entries[p].first) -=
+                              old_value * lines[distribute[row]].entries[p].second *
+                              lines[distribute[global_col]].inhomogeneity;
+                        }
+
+                      entry->value() = (row == global_col ? average_diagonal : 0. );
+                    }
+                }
+            }
+
+          // take care of vector
+          if (use_vectors == true)
+            {
+              for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
+                vec(lines[distribute[row]].entries[q].first)
+                += (vec(row) * lines[distribute[row]].entries[q].second);
+
+              vec(lines[distribute[row]].index) = 0.;
+            }
+        }
+    }
+}
+
+
+//TODO: I'm sure the following could be made more elegant by using a bit of
+//introspection using static member variables of the various vector
+//classes to dispatch between the different functions, rather than using
+//knowledge of the individual types
+
+// number of functions to select the right implementation for set_zero().
+namespace internal
+{
+  namespace AffineConstraintsImplementation
+  {
+    namespace
+    {
+      typedef types::global_dof_index size_type;
+
+      template <class VectorType>
+      void set_zero_parallel(const std::vector<size_type> &cm,
+                             VectorType &vec,
+                             size_type shift = 0)
+      {
+        Assert(!vec.has_ghost_elements(), ExcInternalError());
+        IndexSet locally_owned = vec.locally_owned_elements();
+        for (typename std::vector<size_type>::const_iterator it = cm.begin();
+             it != cm.end(); ++it)
+          {
+            // If shift>0 then we are working on a part of a BlockVector
+            // so vec(i) is actually the global entry i+shift.
+            // We first make sure the line falls into the range of vec,
+            // then check if is part of the local part of the vector, before
+            // finally setting it to 0.
+            if ((*it)<shift)
+              continue;
+            size_type idx = *it - shift;
+            if (idx<vec.size() && locally_owned.is_element(idx))
+              internal::ElementAccess<VectorType>::set(0., idx, vec);
+          }
+      }
+
+      template <typename Number>
+      void set_zero_parallel(const std::vector<size_type> &cm, LinearAlgebra::distributed::Vector<Number> &vec, size_type shift = 0)
+      {
+        for (typename std::vector<size_type>::const_iterator it = cm.begin();
+             it != cm.end(); ++it)
+          {
+            // If shift>0 then we are working on a part of a BlockVector
+            // so vec(i) is actually the global entry i+shift.
+            // We first make sure the line falls into the range of vec,
+            // then check if is part of the local part of the vector, before
+            // finally setting it to 0.
+            if ((*it)<shift)
+              continue;
+            size_type idx = *it - shift;
+            if (vec.in_local_range(idx))
+              vec(idx) = 0.;
+          }
+        vec.zero_out_ghosts();
+      }
+
+      template <class VectorType>
+      void set_zero_in_parallel(const std::vector<size_type> &cm,
+                                VectorType                   &vec,
+                                std::integral_constant<bool, false>)
+      {
+        set_zero_parallel(cm, vec, 0);
+      }
+
+      // in parallel for BlockVectors
+      template <class VectorType>
+      void set_zero_in_parallel(const std::vector<size_type> &cm,
+                                VectorType                   &vec,
+                                std::integral_constant<bool, true>)
+      {
+        size_type start_shift = 0;
+        for (size_type j=0; j<vec.n_blocks(); ++j)
+          {
+            set_zero_parallel(cm, vec.block(j), start_shift);
+            start_shift += vec.block(j).size();
+          }
+      }
+
+      template <class VectorType>
+      void set_zero_serial(const std::vector<size_type> &cm,
+                           VectorType                   &vec)
+      {
+        for (typename std::vector<size_type>::const_iterator it = cm.begin();
+             it != cm.end(); ++it)
+          vec(*it) = 0.;
+      }
+
+      template <class VectorType>
+      void set_zero_all(const std::vector<size_type> &cm,
+                        VectorType                   &vec)
+      {
+        set_zero_in_parallel<VectorType>(cm, vec, std::integral_constant<bool, IsBlockVector<VectorType>::value>());
+        vec.compress(VectorOperation::insert);
+      }
+
+
+      template <class T>
+      void set_zero_all(const std::vector<size_type> &cm,
+                        dealii::Vector<T>            &vec)
+      {
+        set_zero_serial(cm, vec);
+      }
+
+      template <class T>
+      void set_zero_all(const std::vector<size_type> &cm,
+                        dealii::BlockVector<T>       &vec)
+      {
+        set_zero_serial(cm, vec);
+      }
+    }
+  }
+}
+
+
+template <class VectorType>
+void
+AffineConstraints::set_zero (VectorType &vec) const
+{
+  // since we lines is a private member, we cannot pass it to the functions
+  // above. therefore, copy the content which is cheap
+  std::vector<size_type> constrained_lines(lines.size());
+  for (unsigned int i=0; i<lines.size(); ++i)
+    constrained_lines[i] = lines[i].index;
+  internal::AffineConstraintsImplementation::set_zero_all(constrained_lines, vec);
+}
+
+
+
+template <typename VectorType, typename LocalType>
+void
+AffineConstraints::
+distribute_local_to_global (const Vector<LocalType>      &local_vector,
+                            const std::vector<size_type> &local_dof_indices,
+                            VectorType                   &global_vector,
+                            const FullMatrix<LocalType>  &local_matrix) const
+{
+  distribute_local_to_global(local_vector,local_dof_indices,local_dof_indices, global_vector, local_matrix, true);
+}
+
+
+
+template <typename VectorType, typename LocalType>
+void
+AffineConstraints::
+distribute_local_to_global (const Vector<LocalType>      &local_vector,
+                            const std::vector<size_type> &local_dof_indices_row,
+                            const std::vector<size_type> &local_dof_indices_col,
+                            VectorType                   &global_vector,
+                            const FullMatrix<LocalType>  &local_matrix,
+                            bool diagonal) const
+{
+  Assert (sorted == true, ExcMatrixNotClosed());
+  AssertDimension (local_vector.size(), local_dof_indices_row.size());
+  AssertDimension (local_matrix.m(), local_dof_indices_row.size());
+  AssertDimension (local_matrix.n(), local_dof_indices_col.size());
+
+  // diagonal checks if we have only one index set (if both are equal
+  // diagonal should be set to true).
+  // If true we do both, assembly of the right hand side (next lines)
+  // and (see further below) modifications of the right hand side
+  // according to the inhomogeneous constraints.
+  // Otherwise we only modify the right hand side according to
+  // local_matrix and the inhomogeneous constraints, and omit the vector add.
+
+  const size_type m_local_dofs = local_dof_indices_row.size();
+  const size_type n_local_dofs = local_dof_indices_col.size();
+  if (lines.empty())
+    {
+      if (diagonal)
+        global_vector.add(local_dof_indices_row, local_vector);
+    }
+  else
+    for (size_type i=0; i<n_local_dofs; ++i)
+      {
+        // check whether the current index is
+        // constrained. if not, just write the entry
+        // into the vector. otherwise, need to resolve
+        // the constraint
+        if (is_constrained(local_dof_indices_col[i]) == false)
+          {
+            if (diagonal)
+              global_vector(local_dof_indices_row[i]) += local_vector(i);
+            continue;
+          }
+
+        // find the constraint line to the given
+        // global dof index
+        const size_type line_index = calculate_line_index (local_dof_indices_col[i]);
+        const ConstraintLine *position =
+          lines_cache.size() <= line_index ? nullptr : &lines[lines_cache[line_index]];
+
+        // Gauss elimination of the matrix columns with the inhomogeneity.
+        // Go through them one by one and again check whether they are
+        // constrained. If so, distribute the constraint
+        const double val = position->inhomogeneity;
+        if (val != 0)
+          for (size_type j=0; j<m_local_dofs; ++j)
+            {
+              if (is_constrained(local_dof_indices_row[j]) == false)
+                {
+                  global_vector(local_dof_indices_row[j]) -= val * local_matrix(j,i);
+                  continue;
+                }
+
+              const LocalType matrix_entry = local_matrix(j,i);
+
+              if (matrix_entry == LocalType())
+                continue;
+
+              const ConstraintLine &position_j =
+                lines[lines_cache[calculate_line_index(local_dof_indices_row[j])]];
+
+              for (size_type q=0; q<position_j.entries.size(); ++q)
+                {
+                  Assert (!(!local_lines.size()
+                            || local_lines.is_element(position_j.entries[q].first))
+                          || is_constrained(position_j.entries[q].first) == false,
+                          ExcMessage ("Tried to distribute to a fixed dof."));
+                  global_vector(position_j.entries[q].first)
+                  -= val * position_j.entries[q].second * matrix_entry;
+                }
+            }
+
+        // now distribute the constraint,
+        // but make sure we don't touch
+        // the entries of fixed dofs
+        if (diagonal)
+          {
+            for (size_type j=0; j<position->entries.size(); ++j)
+              {
+                Assert (!(!local_lines.size()
+                          || local_lines.is_element(position->entries[j].first))
+                        || is_constrained(position->entries[j].first) == false,
+                        ExcMessage ("Tried to distribute to a fixed dof."));
+                global_vector(position->entries[j].first)
+                += local_vector(i) * position->entries[j].second;
+              }
+          }
+      }
+}
+
+
+
+namespace internal
+{
+  namespace
+  {
+    // create an output vector that consists of the input vector's locally owned
+    // elements plus some ghost elements that need to be imported from elsewhere
+    //
+    // this is an operation that is different for all vector types and so we
+    // need a few overloads
+#ifdef DEAL_II_WITH_TRILINOS
+    void
+    import_vector_with_ghost_elements (const TrilinosWrappers::MPI::Vector &vec,
+                                       const IndexSet                      &/*locally_owned_elements*/,
+                                       const IndexSet                      &needed_elements,
+                                       TrilinosWrappers::MPI::Vector       &output,
+                                       const std::integral_constant<bool, false>     /*is_block_vector*/)
+    {
+      Assert(!vec.has_ghost_elements(),
+             ExcGhostsPresent());
+#ifdef DEAL_II_WITH_MPI
+      const Epetra_MpiComm *mpi_comm
+        = dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());
+
+      Assert (mpi_comm != nullptr, ExcInternalError());
+      output.reinit (needed_elements, mpi_comm->GetMpiComm());
+#else
+      output.reinit (needed_elements, MPI_COMM_SELF);
+#endif
+      output = vec;
+    }
+#endif
+
+#ifdef DEAL_II_WITH_PETSC
+    void
+    import_vector_with_ghost_elements (const PETScWrappers::MPI::Vector &vec,
+                                       const IndexSet                   &locally_owned_elements,
+                                       const IndexSet                   &needed_elements,
+                                       PETScWrappers::MPI::Vector       &output,
+                                       const std::integral_constant<bool, false>  /*is_block_vector*/)
+    {
+      output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator());
+      output = vec;
+    }
+#endif
+
+    template <typename number>
+    void
+    import_vector_with_ghost_elements (const LinearAlgebra::distributed::Vector<number> &vec,
+                                       const IndexSet                              &locally_owned_elements,
+                                       const IndexSet                              &needed_elements,
+                                       LinearAlgebra::distributed::Vector<number>       &output,
+                                       const std::integral_constant<bool, false>             /*is_block_vector*/)
+    {
+      // TODO: the in vector might already have all elements. need to find a
+      // way to efficiently avoid the copy then
+      const_cast<LinearAlgebra::distributed::Vector<number>&>(vec).zero_out_ghosts();
+      output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator());
+      output = vec;
+      output.update_ghost_values();
+    }
+
+
+    // all other vector non-block vector types are sequential and we should
+    // not have this function called at all -- so throw an exception
+    template <typename Vector>
+    void
+    import_vector_with_ghost_elements (const Vector                     &/*vec*/,
+                                       const IndexSet                   &/*locally_owned_elements*/,
+                                       const IndexSet                   &/*needed_elements*/,
+                                       Vector                           &/*output*/,
+                                       const std::integral_constant<bool, false>  /*is_block_vector*/)
+    {
+      Assert (false, ExcMessage ("We shouldn't even get here!"));
+    }
+
+
+    // for block vectors, simply dispatch to the individual blocks
+    template <class VectorType>
+    void
+    import_vector_with_ghost_elements (const VectorType                &vec,
+                                       const IndexSet                  &locally_owned_elements,
+                                       const IndexSet                  &needed_elements,
+                                       VectorType                      &output,
+                                       const std::integral_constant<bool, true>  /*is_block_vector*/)
+    {
+      output.reinit (vec.n_blocks());
+
+      types::global_dof_index block_start = 0;
+      for (unsigned int b=0; b<vec.n_blocks(); ++b)
+        {
+          import_vector_with_ghost_elements (vec.block(b),
+                                             locally_owned_elements.get_view (block_start, block_start+vec.block(b).size()),
+                                             needed_elements.get_view (block_start, block_start+vec.block(b).size()),
+                                             output.block(b),
+                                             std::integral_constant<bool, false>());
+          block_start += vec.block(b).size();
+        }
+
+      output.collect_sizes ();
+    }
+  }
+}
+
+
+template <class VectorType>
+void
+AffineConstraints::distribute (VectorType &vec) const
+{
+  Assert (sorted==true, ExcMatrixNotClosed());
+
+  // if the vector type supports parallel storage and if the vector actually
+  // does store only part of the vector, distributing is slightly more
+  // complicated. we might be able to skip the complicated part if one
+  // processor owns everything and pretend that this is a sequential vector,
+  // but it is difficult for the other processors to know whether they should
+  // not do anything or if other processors will create a temporary vector,
+  // exchange data (requiring communication, maybe even with the processors
+  // that do not own anything because of that particular parallel model), and
+  // call compress() finally. the first case here is for the complicated case,
+  // the last else is for the simple case (sequential vector)
+  const IndexSet vec_owned_elements = vec.locally_owned_elements();
+
+  if ( dealii::is_serial_vector< VectorType >::value == false )
+    {
+      // This processor owns only part of the vector. one may think that
+      // every processor should be able to simply communicate those elements
+      // it owns and for which it knows that they act as sources to constrained
+      // DoFs to the owner of these DoFs. This would lead to a scheme where all
+      // we need to do is to add some local elements to (possibly non-local) ones
+      // and then call compress().
+      //
+      // Alas, this scheme does not work as evidenced by the disaster of bug #51,
+      // see http://code.google.com/p/dealii/issues/detail?id=51 and the
+      // reversion of one attempt that implements this in r29662. Rather, we
+      // need to get a vector that has all the *sources* or constraints we
+      // own locally, possibly as ghost vector elements, then read from them,
+      // and finally throw away the ghosted vector. Implement this in the following.
+      IndexSet needed_elements = vec_owned_elements;
+
+      typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+      for (constraint_iterator it = lines.begin();
+           it != lines.end(); ++it)
+        if (vec_owned_elements.is_element(it->index))
+          for (unsigned int i=0; i<it->entries.size(); ++i)
+            if (!vec_owned_elements.is_element(it->entries[i].first))
+              needed_elements.add_index(it->entries[i].first);
+
+      VectorType ghosted_vector;
+      internal::import_vector_with_ghost_elements (vec,
+                                                   vec_owned_elements, needed_elements,
+                                                   ghosted_vector,
+                                                   std::integral_constant<bool, IsBlockVector<VectorType>::value>());
+
+      for (constraint_iterator it = lines.begin();
+           it != lines.end(); ++it)
+        if (vec_owned_elements.is_element(it->index))
+          {
+            typename VectorType::value_type
+            new_value = it->inhomogeneity;
+            for (unsigned int i=0; i<it->entries.size(); ++i)
+              new_value += (static_cast<typename VectorType::value_type>
+                            (internal::ElementAccess<VectorType>::get(
+                               ghosted_vector, it->entries[i].first)) *
+                            it->entries[i].second);
+            AssertIsFinite(new_value);
+            internal::ElementAccess<VectorType>::set(new_value, it->index, vec);
+          }
+
+      // now compress to communicate the entries that we added to
+      // and that weren't to local processors to the owner
+      //
+      // this shouldn't be strictly necessary but it probably doesn't
+      // hurt either
+      vec.compress (VectorOperation::insert);
+    }
+  else
+    // purely sequential vector (either because the type doesn't
+    // support anything else or because it's completely stored
+    // locally)
+    {
+      std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
+      for (; next_constraint != lines.end(); ++next_constraint)
+        {
+          // fill entry in line
+          // next_constraint.index by adding the
+          // different contributions
+          typename VectorType::value_type
+          new_value = next_constraint->inhomogeneity;
+          for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
+            new_value += (static_cast<typename VectorType::value_type>
+                          (internal::ElementAccess<VectorType>::get(
+                             vec, next_constraint->entries[i].first))*
+                          next_constraint->entries[i].second);
+          AssertIsFinite(new_value);
+          internal::ElementAccess<VectorType>::set(new_value, next_constraint->index,
+                                                   vec);
+        }
+    }
+}
+
+
+
+// Some helper definitions for the local_to_global functions.
+namespace internals
+{
+  typedef types::global_dof_index size_type;
+
+  // this struct contains all the information we need to store about each of
+  // the global entries (global_row): are they obtained directly by some local
+  // entry (local_row) or some constraints (constraint_position). This is not
+  // directly used in the user code, but accessed via the GlobalRowsFromLocal.
+  //
+  // The actions performed here correspond to reshaping the constraint
+  // information from global degrees of freedom to local ones (i.e.,
+  // cell-related DoFs), and also transforming the constraint information from
+  // compressed row storage (each local dof that is constrained has a list of
+  // constraint entries associated to it) into compressed column storage based
+  // on the cell-related DoFs (we have a list of global degrees of freedom,
+  // and to each we have a list of local rows where the entries come from). To
+  // increase the speed, we additionally store whether an entry is generated
+  // directly from the local degrees of freedom or whether it comes from a
+  // constraint.
+  struct Distributing
+  {
+    Distributing (const size_type global_row = numbers::invalid_size_type,
+                  const size_type local_row = numbers::invalid_size_type);
+    Distributing (const Distributing &in);
+    Distributing &operator = (const Distributing &in);
+    bool operator < (const Distributing &in) const
+    {
+      return global_row<in.global_row;
+    }
+
+    size_type global_row;
+    size_type local_row;
+    mutable size_type constraint_position;
+  };
+
+  inline
+  Distributing::Distributing (const size_type global_row,
+                              const size_type local_row) :
+    global_row (global_row),
+    local_row (local_row),
+    constraint_position (numbers::invalid_size_type) {}
+
+  inline
+  Distributing::Distributing (const Distributing &in)
+    :
+    constraint_position (numbers::invalid_size_type)
+  {
+    *this = (in);
+  }
+
+  inline
+  Distributing &Distributing::operator = (const Distributing &in)
+  {
+    global_row = in.global_row;
+    local_row = in.local_row;
+    // the constraints pointer should not contain any data here.
+    Assert (constraint_position == numbers::invalid_size_type,
+            ExcInternalError());
+
+    if (in.constraint_position != numbers::invalid_size_type)
+      {
+        constraint_position = in.constraint_position;
+        in.constraint_position = numbers::invalid_size_type;
+      }
+    return *this;
+  }
+
+
+
+  // this is a cache for constraints that are encountered on a local level.
+  // The functionality is similar to
+  // std::vector<std::vector<std::pair<uint,double> > >, but tuned so that
+  // frequent memory allocation for each entry is avoided. The data is put
+  // into a std::vector<std::pair<uint,double> > and the row length is kept
+  // fixed at row_length. Both the number of rows and the row length can
+  // change is this structure is filled. In that case, the data is
+  // rearranged. This is not directly used in the user code, but accessed via
+  // the GlobalRowsFromLocal.
+  struct DataCache
+  {
+    DataCache ()
+      :
+      row_length (8)
+    {}
+
+    void reinit ()
+    {
+      individual_size.resize(0);
+      data.resize(0);
+    }
+
+    size_type insert_new_index (const std::pair<size_type,double> &pair)
+    {
+      Assert(row_length > 0, ExcInternalError());
+      const unsigned int index = individual_size.size();
+      individual_size.push_back(1);
+      data.resize(individual_size.size()*row_length);
+      data[index*row_length] = pair;
+      individual_size[index] = 1;
+      return index;
+    }
+
+    void append_index (const size_type index,
+                       const std::pair<size_type,double> &pair)
+    {
+      AssertIndexRange (index, individual_size.size());
+      const size_type my_length = individual_size[index];
+      if (my_length == row_length)
+        {
+          AssertDimension(data.size(), individual_size.size()*row_length);
+          // no space left in this row, need to double row_length and
+          // rearrange the data items. Move all items to the right except the
+          // first one, starting at the back. Since individual_size contains
+          // at least one element when we get here, subtracting 1 works fine.
+          data.resize(2*data.size());
+          for (size_type i=individual_size.size()-1; i>0; --i)
+            {
+              const auto ptr = data.data();
+              std::move_backward(ptr + i*row_length,
+                                 ptr + i*row_length + individual_size[i],
+                                 ptr + i*2*row_length + individual_size[i]);
+            }
+          row_length *= 2;
+        }
+      data[index*row_length+my_length] = pair;
+      individual_size[index] = my_length + 1;
+    }
+
+    size_type
+    get_size (const size_type index) const
+    {
+      return individual_size[index];
+    }
+
+    const std::pair<size_type,double> *
+    get_entry (const size_type index) const
+    {
+      return &data[index*row_length];
+    }
+
+    size_type row_length;
+
+    std::vector<std::pair<size_type,double> > data;
+
+    std::vector<size_type> individual_size;
+  };
+
+
+
+  // collects all the global rows from a local contribution (cell) and their
+  // origin (direct/constraint). this is basically a vector consisting of
+  // "Distributing" structs using access via the DataCache. Provides some
+  // specialized sort and insert functions.
+  //
+  // in case there are no constraints, this is basically a list of pairs
+  // <uint,unit> with the first index being the global index and the second
+  // index the local index. The list is sorted with respect to the global
+  // index.
+  //
+  // in case there are constraints, a global dof might get a contribution also
+  // because it gets data from a constrained dof. This means that a global dof
+  // might also have indirect contributions from a local dof via a constraint,
+  // besides the direct ones.
+  //
+  // The actions performed here correspond to reshaping the constraint
+  // information from global degrees of freedom to local ones (i.e.,
+  // cell-related DoFs), and also transforming the constraint information from
+  // compressed row storage (each local dof that is constrained has a list of
+  // constraint entries associated to it) into compressed column storage based
+  // on the cell-related DoFs (we have a list of global degrees of freedom,
+  // and to each we have a list of local rows where the entries come from). To
+  // increase the speed, we additionally store whether an entry is generated
+  // directly from the local degrees of freedom or whether it comes from a
+  // constraint.
+  class GlobalRowsFromLocal
+  {
+  public:
+    GlobalRowsFromLocal ()
+      :
+      n_active_rows (0),
+      n_inhomogeneous_rows (0)
+    {}
+
+    void reinit (const size_type n_local_rows)
+    {
+      total_row_indices.resize(n_local_rows);
+      for (unsigned int i=0; i<n_local_rows; ++i)
+        total_row_indices[i].constraint_position = numbers::invalid_size_type;
+      n_active_rows = n_local_rows;
+      n_inhomogeneous_rows = 0;
+      data_cache.reinit();
+    }
+
+    // implemented below
+    void insert_index (const size_type global_row,
+                       const size_type local_row,
+                       const double       constraint_value);
+    void sort ();
+
+    // Print object for debugging purpose
+    void print(std::ostream &os)
+    {
+      os << "Active rows " << n_active_rows << std::endl
+         << "Constr rows " << n_constraints() << std::endl
+         << "Inhom  rows " << n_inhomogeneous_rows << std::endl
+         << "Local: ";
+      for (size_type i=0 ; i<total_row_indices.size() ; ++i)
+        os << ' ' << std::setw(4) << total_row_indices[i].local_row;
+      os << std::endl
+         << "Global:";
+      for (size_type i=0 ; i<total_row_indices.size() ; ++i)
+        os << ' ' << std::setw(4) << total_row_indices[i].global_row;
+      os << std::endl
+         << "ConPos:";
+      for (size_type i=0 ; i<total_row_indices.size() ; ++i)
+        os << ' ' << std::setw(4) << total_row_indices[i].constraint_position;
+      os << std::endl;
+    }
+
+
+    // return all kind of information on the constraints
+
+    // returns the number of global indices in the struct
+    size_type size () const
+    {
+      return n_active_rows;
+    }
+
+    // returns the number of constraints that are associated to the
+    // counter_index-th entry in the list
+    size_type size (const size_type counter_index) const
+    {
+      return (total_row_indices[counter_index].constraint_position ==
+              numbers::invalid_size_type ?
+              0 :
+              data_cache.get_size(total_row_indices[counter_index].
+                                  constraint_position));
+    }
+
+    // returns the global row of the counter_index-th entry in the list
+    size_type global_row (const size_type counter_index) const
+    {
+      return total_row_indices[counter_index].global_row;
+    }
+
+    // returns the global row of the counter_index-th entry in the list
+    size_type &global_row (const size_type counter_index)
+    {
+      return total_row_indices[counter_index].global_row;
+    }
+
+    // returns the local row in the cell matrix associated with the
+    // counter_index-th entry in the list. Returns invalid_size_type for
+    // constrained rows
+    size_type local_row (const size_type counter_index) const
+    {
+      return total_row_indices[counter_index].local_row;
+    }
+
+    // writable index
+    size_type &local_row (const size_type counter_index)
+    {
+      return total_row_indices[counter_index].local_row;
+    }
+
+    // returns the local row in the cell matrix associated with the
+    // counter_index-th entry in the list in the index_in_constraint-th
+    // position of constraints
+    size_type local_row (const size_type counter_index,
+                         const size_type index_in_constraint) const
+    {
+      return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
+              [index_in_constraint]).first;
+    }
+
+    // returns the value of the constraint in the counter_index-th entry in
+    // the list in the index_in_constraint-th position of constraints
+    double constraint_value (const size_type counter_index,
+                             const size_type index_in_constraint) const
+    {
+      return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
+              [index_in_constraint]).second;
+    }
+
+    // returns whether there is one row with indirect contributions (i.e.,
+    // there has been at least one constraint with non-trivial ConstraintLine)
+    bool have_indirect_rows () const
+    {
+      return data_cache.individual_size.empty() == false;
+    }
+
+    // append an entry that is constrained. This means that there is one less
+    // nontrivial row
+    void insert_constraint (const size_type constrained_local_dof)
+    {
+      --n_active_rows;
+      total_row_indices[n_active_rows].local_row = constrained_local_dof;
+      total_row_indices[n_active_rows].global_row = numbers::invalid_size_type;
+    }
+
+    // returns the number of constrained dofs in the structure. Constrained
+    // dofs do not contribute directly to the matrix, but are needed in order
+    // to set matrix diagonals and resolve inhomogeneities
+    size_type n_constraints () const
+    {
+      return total_row_indices.size()-n_active_rows;
+    }
+
+    // returns the number of constrained dofs in the structure that have an
+    // inhomogeneity
+    size_type n_inhomogeneities () const
+    {
+      return n_inhomogeneous_rows;
+    }
+
+    // tells the structure that the ith constraint is
+    // inhomogeneous. inhomogeneous constraints contribute to right hand
+    // sides, so to have fast access to them, put them before homogeneous
+    // constraints
+    void set_ith_constraint_inhomogeneous (const size_type i)
+    {
+      Assert (i >= n_inhomogeneous_rows, ExcInternalError());
+      std::swap (total_row_indices[n_active_rows+i],
+                 total_row_indices[n_active_rows+n_inhomogeneous_rows]);
+      n_inhomogeneous_rows++;
+    }
+
+    // the local row where constraint number i was detected, to find that row
+    // easily when the GlobalRowsToLocal has been set up
+    size_type constraint_origin (size_type i) const
+    {
+      return total_row_indices[n_active_rows+i].local_row;
+    }
+
+    // a vector that contains all the global ids and the corresponding local
+    // ids as well as a pointer to that data where we store how to resolve
+    // constraints.
+    std::vector<Distributing> total_row_indices;
+
+  private:
+    // holds the actual data from the constraints
+    DataCache                 data_cache;
+
+    // how many rows there are, constraints disregarded
+    size_type                 n_active_rows;
+
+    // the number of rows with inhomogeneous constraints
+    size_type                 n_inhomogeneous_rows;
+  };
+
+  // a function that appends an additional row to the list of values, or
+  // appends a value to an already existing row. Similar functionality as for
+  // std::map<size_type,Distributing>, but here done for a
+  // std::vector<Distributing>, much faster for short lists as we have them
+  // here
+  inline
+  void
+  GlobalRowsFromLocal::insert_index (const size_type global_row,
+                                     const size_type local_row,
+                                     const double    constraint_value)
+  {
+    typedef std::vector<Distributing>::iterator index_iterator;
+    index_iterator pos, pos1;
+    Distributing row_value (global_row);
+    std::pair<size_type,double> constraint (local_row, constraint_value);
+
+    // check whether the list was really sorted before entering here
+    for (size_type i=1; i<n_active_rows; ++i)
+      Assert (total_row_indices[i-1] < total_row_indices[i], ExcInternalError());
+
+    pos = Utilities::lower_bound (total_row_indices.begin(),
+                                  total_row_indices.begin()+n_active_rows,
+                                  row_value);
+    if (pos->global_row == global_row)
+      pos1 = pos;
+    else
+      {
+        pos1 = total_row_indices.insert(pos, row_value);
+        ++n_active_rows;
+      }
+
+    if (pos1->constraint_position == numbers::invalid_size_type)
+      pos1->constraint_position = data_cache.insert_new_index (constraint);
+    else
+      data_cache.append_index (pos1->constraint_position, constraint);
+  }
+
+  // this sort algorithm sorts std::vector<Distributing>, but does not take
+  // the constraints into account. this means that in case that constraints
+  // are already inserted, this function does not work as expected. Use
+  // shellsort, which is very fast in case the indices are already sorted
+  // (which is the usual case with DG elements), and not too slow in other
+  // cases
+  inline
+  void
+  GlobalRowsFromLocal::sort ()
+  {
+    size_type i, j, j2, temp, templ, istep;
+    size_type step;
+
+    // check whether the constraints are really empty.
+    const size_type length = size();
+
+    // make sure that we are in the range of the vector
+    AssertIndexRange (length, total_row_indices.size()+1);
+    for (size_type i=0; i<length; ++i)
+      Assert (total_row_indices[i].constraint_position ==
+              numbers::invalid_size_type,
+              ExcInternalError());
+
+    step = length/2;
+    while (step > 0)
+      {
+        for (i=step; i < length; i++)
+          {
+            istep = step;
+            j = i;
+            j2 = j-istep;
+            temp = total_row_indices[i].global_row;
+            templ = total_row_indices[i].local_row;
+            if (total_row_indices[j2].global_row > temp)
+              {
+                while ((j >= istep) && (total_row_indices[j2].global_row > temp))
+                  {
+                    total_row_indices[j].global_row = total_row_indices[j2].global_row;
+                    total_row_indices[j].local_row = total_row_indices[j2].local_row;
+                    j = j2;
+                    j2 -= istep;
+                  }
+                total_row_indices[j].global_row = temp;
+                total_row_indices[j].local_row = templ;
+              }
+          }
+        step = step>>1;
+      }
+  }
+
+
+
+  /**
+   * Scratch data that is used during calls to distribute_local_to_global and
+   * add_entries_local_to_global. In order to avoid frequent memory
+   * allocation, we keep the data alive from one call to the next in a static
+   * variable. Since we want to allow for different number types in matrices,
+   * this is a template.
+   *
+   * Since each thread gets its private version of scratch data out of the
+   * ThreadLocalStorage, no conflicting access can occur. For this to be
+   * valid, we need to make sure that no call within
+   * distribute_local_to_global is made that by itself can spawn tasks.
+   * Otherwise, we might end up in a situation where several threads fight for
+   * the data.
+   *
+   * Access to the scratch data is only through the accessor class which
+   * handles the access as well as marking the data as used.
+   */
+  template <typename MatrixScalar, typename VectorScalar = MatrixScalar>
+  class AffineConstraintsData
+  {
+  public:
+    struct ScratchData
+    {
+      /**
+       * Constructor, does nothing.
+       */
+      ScratchData ()
+        :
+        in_use (false)
+      {}
+
+      /**
+       * Copy constructor, does nothing
+       */
+      ScratchData (const ScratchData &)
+        :
+        in_use (false)
+      {}
+
+      /**
+       * Stores whether the data is currently in use.
+       */
+      bool in_use;
+
+      /**
+       * Temporary array for column indices
+       */
+      std::vector<size_type> columns;
+
+      /**
+       * Temporary array for column values
+       */
+      std::vector<MatrixScalar>    values;
+
+      /**
+       * Temporary array for block start indices
+       */
+      std::vector<size_type> block_starts;
+
+      /**
+       * Temporary array for vector indices
+       */
+      std::vector<size_type> vector_indices;
+
+      /**
+       * Temporary array for vector values
+       */
+      std::vector<VectorScalar> vector_values;
+
+      /**
+       * Data array for reorder row/column indices.
+       */
+      GlobalRowsFromLocal global_rows;
+
+      /**
+       * Data array for reorder row/column indices.
+       */
+      GlobalRowsFromLocal global_columns;
+    };
+
+    /**
+     * Accessor class to guard access to scratch_data
+     */
+    class ScratchDataAccessor
+    {
+    public:
+      /**
+       * Constructor. Grabs a scratch data object on the current thread and
+       * mark it as used
+       */
+      ScratchDataAccessor()
+        :
+        my_scratch_data(&AffineConstraintsData::scratch_data.get())
+      {
+        Assert(my_scratch_data->in_use == false,
+               ExcMessage("Access to thread-local scratch data tried, but it is already "
+                          "in use"));
+        my_scratch_data->in_use = true;
+      }
+
+      /**
+       * Destructor. Mark scratch data as available again.
+       */
+      ~ScratchDataAccessor()
+      {
+        my_scratch_data->in_use = false;
+      }
+
+      /**
+       * Dereferencing operator.
+       */
+      ScratchData &operator* ()
+      {
+        return *my_scratch_data;
+      }
+
+      /**
+       * Dereferencing operator.
+       */
+      ScratchData *operator-> ()
+      {
+        return my_scratch_data;
+      }
+
+    private:
+      ScratchData *my_scratch_data;
+    };
+
+  private:
+    /**
+     * The actual data object that contains a scratch data for each thread.
+     */
+    static Threads::ThreadLocalStorage<ScratchData> scratch_data;
+  };
+
+
+
+  // function for block matrices: Find out where in the list of local dofs
+  // (sorted according to global ids) the individual blocks start. Transform
+  // the global indices to block-local indices in order to be able to use
+  // functions like vector.block(1)(block_local_id), instead of
+  // vector(global_id). This avoids transforming indices one-by-one later on.
+  template <class BlockType>
+  inline
+  void
+  make_block_starts (const BlockType        &block_object,
+                     GlobalRowsFromLocal    &global_rows,
+                     std::vector<size_type> &block_starts)
+  {
+    AssertDimension (block_starts.size(), block_object.n_block_rows()+1);
+
+    typedef std::vector<Distributing>::iterator row_iterator;
+    row_iterator block_indices = global_rows.total_row_indices.begin();
+
+    const size_type num_blocks = block_object.n_block_rows();
+    const size_type n_active_rows = global_rows.size();
+
+    // find end of rows.
+    block_starts[0] = 0;
+    for (size_type i=1; i<num_blocks; ++i)
+      {
+        row_iterator first_block =
+          Utilities::lower_bound (block_indices,
+                                  global_rows.total_row_indices.begin()+n_active_rows,
+                                  Distributing(block_object.get_row_indices().block_start(i)));
+        block_starts[i] = first_block - global_rows.total_row_indices.begin();
+        block_indices = first_block;
+      }
+    block_starts[num_blocks] = n_active_rows;
+
+    // transform row indices to block-local index space
+    for (size_type i=block_starts[1]; i<n_active_rows; ++i)
+      global_rows.global_row(i) = block_object.get_row_indices().
+                                  global_to_local(global_rows.global_row(i)).second;
+  }
+
+
+
+  // same as before, but for std::vector<uint> instead of
+  // GlobalRowsFromLocal. Used in functions for sparsity patterns.
+  template <class BlockType>
+  inline
+  void
+  make_block_starts (const BlockType        &block_object,
+                     std::vector<size_type> &row_indices,
+                     std::vector<size_type> &block_starts)
+  {
+    AssertDimension (block_starts.size(), block_object.n_block_rows()+1);
+
+    typedef std::vector<size_type>::iterator row_iterator;
+    row_iterator col_indices = row_indices.begin();
+
+    const size_type num_blocks = block_object.n_block_rows();
+
+    // find end of rows.
+    block_starts[0] = 0;
+    for (size_type i=1; i<num_blocks; ++i)
+      {
+        row_iterator first_block =
+          Utilities::lower_bound (col_indices,
+                                  row_indices.end(),
+                                  block_object.get_row_indices().block_start(i));
+        block_starts[i] = first_block - row_indices.begin();
+        col_indices = first_block;
+      }
+    block_starts[num_blocks] = row_indices.size();
+
+    // transform row indices to local index space
+    for (size_type i=block_starts[1]; i<row_indices.size(); ++i)
+      row_indices[i] = block_object.get_row_indices().
+                       global_to_local(row_indices[i]).second;
+  }
+
+
+
+  // resolves constraints of one column at the innermost loop. goes through
+  // the origin of each global entry and finds out which data we need to
+  // collect.
+  template <typename LocalType>
+  static inline
+  LocalType resolve_matrix_entry (const GlobalRowsFromLocal   &global_rows,
+                                  const GlobalRowsFromLocal   &global_cols,
+                                  const size_type              i,
+                                  const size_type              j,
+                                  const size_type              loc_row,
+                                  const FullMatrix<LocalType> &local_matrix)
+  {
+    const size_type loc_col = global_cols.local_row(j);
+    LocalType col_val;
+
+    // case 1: row has direct contribution in local matrix. decide whether col
+    // has a direct contribution. if not, set the value to zero.
+    if (loc_row != numbers::invalid_size_type)
+      {
+        col_val = ((loc_col != numbers::invalid_size_type) ?
+                   local_matrix(loc_row, loc_col) : 0);
+
+        // account for indirect contributions by constraints in column
+        for (size_type p=0; p<global_cols.size(j); ++p)
+          col_val += (local_matrix(loc_row, global_cols.local_row(j,p)) *
+                      global_cols.constraint_value(j,p));
+      }
+
+    // case 2: row has no direct contribution in local matrix
+    else
+      col_val = 0;
+
+    // account for indirect contributions by constraints in row, going trough
+    // the direct and indirect references in the given column.
+    for (size_type q=0; q<global_rows.size(i); ++q)
+      {
+        LocalType add_this = (loc_col != numbers::invalid_size_type)
+                             ? local_matrix(global_rows.local_row(i,q), loc_col) : 0;
+
+        for (size_type p=0; p<global_cols.size(j); ++p)
+          add_this += (local_matrix(global_rows.local_row(i,q),
+                                    global_cols.local_row(j,p))
+                       *
+                       global_cols.constraint_value(j,p));
+        col_val += add_this * global_rows.constraint_value(i,q);
+      }
+    return col_val;
+  }
+
+
+
+  // computes all entries that need to be written into global_rows[i]. Lists
+  // the resulting values in val_ptr, and the corresponding column indices in
+  // col_ptr.
+  template <typename number, typename LocalType>
+  inline
+  void
+  resolve_matrix_row (const GlobalRowsFromLocal    &global_rows,
+                      const GlobalRowsFromLocal    &global_cols,
+                      const size_type               i,
+                      const size_type               column_start,
+                      const size_type               column_end,
+                      const FullMatrix<LocalType>  &local_matrix,
+                      size_type                   *&col_ptr,
+                      number                      *&val_ptr)
+  {
+    if (column_end == column_start)
+      return;
+
+    AssertIndexRange (column_end-1, global_cols.size());
+    const size_type loc_row = global_rows.local_row(i);
+
+    // fast function if there are no indirect references to any of the local
+    // rows at all on this set of dofs (saves a lot of checks). the only check
+    // we actually need to perform is whether the matrix element is zero.
+    if (global_rows.have_indirect_rows() == false &&
+        global_cols.have_indirect_rows() == false)
+      {
+        AssertIndexRange(loc_row, local_matrix.m());
+        const LocalType *matrix_ptr = &local_matrix(loc_row, 0);
+
+        for (size_type j=column_start; j<column_end; ++j)
+          {
+            const size_type loc_col = global_cols.local_row(j);
+            AssertIndexRange(loc_col, local_matrix.n());
+            const LocalType col_val = matrix_ptr[loc_col];
+            if (col_val != LocalType ())
+              {
+                *val_ptr++ = static_cast<number> (col_val);
+                *col_ptr++ = global_cols.global_row(j);
+              }
+          }
+      }
+
+    // more difficult part when there are indirect references and when we need
+    // to do some more checks.
+    else
+      {
+        for (size_type j=column_start; j<column_end; ++j)
+          {
+            LocalType col_val = resolve_matrix_entry (global_rows, global_cols, i, j,
+                                                      loc_row, local_matrix);
+
+            // if we got some nontrivial value, append it to the array of
+            // values.
+            if (col_val != LocalType ())
+              {
+                *val_ptr++ = static_cast<number> (col_val);
+                *col_ptr++ = global_cols.global_row(j);
+              }
+          }
+      }
+  }
+
+
+
+  // specialized function that can write into the row of a
+  // SparseMatrix<number>.
+  namespace dealiiSparseMatrix
+  {
+    template <typename SparseMatrixIterator, typename LocalType>
+    static inline
+    void add_value (const LocalType       value,
+                    const size_type       row,
+                    const size_type       column,
+                    SparseMatrixIterator &matrix_values)
+    {
+      (void)row;
+      if (value != LocalType ())
+        {
+          while (matrix_values->column() < column)
+            ++matrix_values;
+          Assert (matrix_values->column() == column,
+                  typename SparseMatrix<typename SparseMatrixIterator::MatrixType::value_type>::ExcInvalidIndex(row, column));
+          matrix_values->value() += value;
+        }
+    }
+  }
+
+
+  // similar as before, now with shortcut for deal.II sparse matrices. this
+  // lets us avoid using extra arrays, and does all the operations just in
+  // place, i.e., in the respective matrix row
+  template <typename number, typename LocalType>
+  inline
+  void
+  resolve_matrix_row (const GlobalRowsFromLocal   &global_rows,
+                      const size_type              i,
+                      const size_type              column_start,
+                      const size_type              column_end,
+                      const FullMatrix<LocalType> &local_matrix,
+                      SparseMatrix<number>        *sparse_matrix)
+  {
+    if (column_end == column_start)
+      return;
+
+    AssertIndexRange (column_end-1, global_rows.size());
+    const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern();
+
+    if (sparsity.n_nonzero_elements() == 0)
+      return;
+
+    const size_type row = global_rows.global_row(i);
+    const size_type loc_row = global_rows.local_row(i);
+
+    typename SparseMatrix<number>::iterator
+    matrix_values = sparse_matrix->begin(row);
+    const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols();
+
+    // distinguish three cases about what can happen for checking whether the
+    // diagonal is the first element of the row. this avoids if statements at
+    // the innermost loop positions
+
+    if (!optimize_diagonal) // case 1: no diagonal optimization in matrix
+      {
+        if (global_rows.have_indirect_rows() == false)
+          {
+            AssertIndexRange (loc_row, local_matrix.m());
+            const LocalType *matrix_ptr = &local_matrix(loc_row, 0);
+
+            for (size_type j=column_start; j<column_end; ++j)
+              {
+                const size_type loc_col = global_rows.local_row(j);
+                const LocalType col_val = matrix_ptr[loc_col];
+                dealiiSparseMatrix::add_value (col_val, row,
+                                               global_rows.global_row(j),
+                                               matrix_values);
+              }
+          }
+        else
+          {
+            for (size_type j=column_start; j<column_end; ++j)
+              {
+                LocalType col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
+                                                          loc_row, local_matrix);
+                dealiiSparseMatrix::add_value (col_val, row,
+                                               global_rows.global_row(j),
+                                               matrix_values);
+              }
+          }
+      }
+    else if (i>=column_start && i<column_end) // case 2: can split loop
+      {
+        ++matrix_values; // jump over diagonal element
+        if (global_rows.have_indirect_rows() == false)
+          {
+            AssertIndexRange (loc_row, local_matrix.m());
+            const LocalType *matrix_ptr = &local_matrix(loc_row, 0);
+
+            sparse_matrix->begin(row)->value() += matrix_ptr[loc_row];
+            for (size_type j=column_start; j<i; ++j)
+              {
+                const size_type loc_col = global_rows.local_row(j);
+                const LocalType col_val = matrix_ptr[loc_col];
+                dealiiSparseMatrix::add_value(col_val, row,
+                                              global_rows.global_row(j),
+                                              matrix_values);
+              }
+            for (size_type j=i+1; j<column_end; ++j)
+              {
+                const size_type loc_col = global_rows.local_row(j);
+                const LocalType col_val = matrix_ptr[loc_col];
+                dealiiSparseMatrix::add_value(col_val, row,
+                                              global_rows.global_row(j),
+                                              matrix_values);
+              }
+          }
+        else
+          {
+            sparse_matrix->begin(row)->value() +=
+              resolve_matrix_entry (global_rows, global_rows, i, i,
+                                    loc_row, local_matrix);
+            for (size_type j=column_start; j<i; ++j)
+              {
+                LocalType col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
+                                                          loc_row, local_matrix);
+                dealiiSparseMatrix::add_value (col_val, row,
+                                               global_rows.global_row(j),
+                                               matrix_values);
+              }
+            for (size_type j=i+1; j<column_end; ++j)
+              {
+                LocalType col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
+                                                          loc_row, local_matrix);
+                dealiiSparseMatrix::add_value (col_val, row,
+                                               global_rows.global_row(j),
+                                               matrix_values);
+              }
+          }
+      }
+    // case 3: can't say - need to check inside the loop
+    else if (global_rows.have_indirect_rows() == false)
+      {
+        ++matrix_values; // jump over diagonal element
+        AssertIndexRange (loc_row, local_matrix.m());
+        const LocalType *matrix_ptr = &local_matrix(loc_row, 0);
+
+        for (size_type j=column_start; j<column_end; ++j)
+          {
+            const size_type loc_col = global_rows.local_row(j);
+            const LocalType col_val = matrix_ptr[loc_col];
+            if (row==global_rows.global_row(j))
+              sparse_matrix->begin(row)->value() += col_val;
+            else
+              dealiiSparseMatrix::add_value(col_val, row,
+                                            global_rows.global_row(j),
+                                            matrix_values);
+          }
+      }
+    else
+      {
+        ++matrix_values; // jump over diagonal element
+        for (size_type j=column_start; j<column_end; ++j)
+          {
+            LocalType col_val = resolve_matrix_entry (global_rows, global_rows, i,
+                                                      j, loc_row, local_matrix);
+            if (row==global_rows.global_row(j))
+              sparse_matrix->begin(row)->value() += col_val;
+            else
+              dealiiSparseMatrix::add_value (col_val, row,
+                                             global_rows.global_row(j),
+                                             matrix_values);
+          }
+      }
+  }
+
+
+
+  // Same function to resolve all entries that will be added to the given
+  // global row global_rows[i] as before, now for sparsity pattern
+  inline
+  void
+  resolve_matrix_row (const GlobalRowsFromLocal        &global_rows,
+                      const size_type                   i,
+                      const size_type                   column_start,
+                      const size_type                   column_end,
+                      const Table<2,bool>              &dof_mask,
+                      std::vector<size_type>::iterator &col_ptr)
+  {
+    if (column_end == column_start)
+      return;
+
+    const size_type loc_row = global_rows.local_row(i);
+
+    // fast function if there are no indirect references to any of the local
+    // rows at all on this set of dofs
+    if (global_rows.have_indirect_rows() == false)
+      {
+        Assert(loc_row < dof_mask.n_rows(),
+               ExcInternalError());
+
+        for (size_type j=column_start; j<column_end; ++j)
+          {
+            const size_type loc_col = global_rows.local_row(j);
+            Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
+
+            if (dof_mask(loc_row,loc_col) == true)
+              *col_ptr++ = global_rows.global_row(j);
+          }
+      }
+
+    // slower functions when there are indirect references and when we need to
+    // do some more checks.
+    else
+      {
+        for (size_type j=column_start; j<column_end; ++j)
+          {
+            const size_type loc_col = global_rows.local_row(j);
+            if (loc_row != numbers::invalid_size_type)
+              {
+                Assert (loc_row < dof_mask.n_rows(), ExcInternalError());
+                if (loc_col != numbers::invalid_size_type)
+                  {
+                    Assert (loc_col < dof_mask.n_cols(), ExcInternalError());
+                    if (dof_mask(loc_row,loc_col) == true)
+                      goto add_this_index;
+                  }
+
+                for (size_type p=0; p<global_rows.size(j); ++p)
+                  if (dof_mask(loc_row,global_rows.local_row(j,p)) == true)
+                    goto add_this_index;
+              }
+
+            for (size_type q=0; q<global_rows.size(i); ++q)
+              {
+                if (loc_col != numbers::invalid_size_type)
+                  {
+                    Assert (loc_col < dof_mask.n_cols(), ExcInternalError());
+                    if (dof_mask(global_rows.local_row(i,q),loc_col) == true)
+                      goto add_this_index;
+                  }
+
+                for (size_type p=0; p<global_rows.size(j); ++p)
+                  if (dof_mask(global_rows.local_row(i,q),
+                               global_rows.local_row(j,p)) == true)
+                    goto add_this_index;
+              }
+
+            continue;
+            // if we got some nontrivial value, append it to the array of
+            // values.
+add_this_index:
+            *col_ptr++ = global_rows.global_row(j);
+          }
+      }
+  }
+
+
+
+  // to make sure that the global matrix remains invertible, we need to do
+  // something with the diagonal elements. add the absolute value of the local
+  // matrix, so the resulting entry will always be positive and furthermore be
+  // in the same order of magnitude as the other elements of the matrix
+  //
+  // note that this also captures the special case that a dof is both
+  // constrained and fixed (this can happen for hanging nodes in 3d that also
+  // happen to be on the boundary). in that case, following the program flow
+  // in distribute_local_to_global, it is realized that when distributing the
+  // row and column no elements of the matrix are actually touched if all the
+  // degrees of freedom to which this dof is constrained are also constrained
+  // (the usual case with hanging nodes in 3d). however, in the line below, we
+  // do actually do something with this dof
+  template <typename MatrixType, typename VectorType>
+  inline void
+  set_matrix_diagonals (const internals::GlobalRowsFromLocal              &global_rows,
+                        const std::vector<size_type>                      &local_dof_indices,
+                        const FullMatrix<typename MatrixType::value_type> &local_matrix,
+                        const AffineConstraints                           &constraints,
+                        MatrixType                                        &global_matrix,
+                        VectorType                                        &global_vector,
+                        bool                                               use_inhomogeneities_for_rhs)
+  {
+    if (global_rows.n_constraints() > 0)
+      {
+        typename MatrixType::value_type average_diagonal = typename MatrixType::value_type();
+        for (size_type i=0; i<local_matrix.m(); ++i)
+          average_diagonal += std::abs (local_matrix(i,i));
+        average_diagonal /= static_cast<double>(local_matrix.m());
+
+        for (size_type i=0; i<global_rows.n_constraints(); i++)
+          {
+            const size_type local_row = global_rows.constraint_origin(i);
+            const size_type global_row = local_dof_indices[local_row];
+            const typename MatrixType::value_type new_diagonal
+              = (std::abs(local_matrix(local_row,local_row)) != 0 ?
+                 std::abs(local_matrix(local_row,local_row)) : average_diagonal);
+            global_matrix.add(global_row, global_row, new_diagonal);
+
+            // if the use_inhomogeneities_for_rhs flag is set to true, the
+            // inhomogeneities are used to create the global vector. instead
+            // of fill in a zero in the ith components with an inhomogeneity,
+            // we set those to: inhomogeneity(i)*global_matrix (i,i).
+            if (use_inhomogeneities_for_rhs == true)
+              global_vector(global_row) += new_diagonal * constraints.get_inhomogeneity(global_row);
+          }
+      }
+  }
+
+
+
+  // similar function as the one above for setting matrix diagonals, but now
+  // doing that for sparsity patterns when setting them up using
+  // add_entries_local_to_global. In case we keep constrained entries, add all
+  // the rows and columns related to the constrained dof, otherwise just add
+  // the diagonal
+  template <typename SparsityPatternType>
+  inline void
+  set_sparsity_diagonals (const internals::GlobalRowsFromLocal &global_rows,
+                          const std::vector<size_type>         &local_dof_indices,
+                          const Table<2,bool>                  &dof_mask,
+                          const bool                            keep_constrained_entries,
+                          SparsityPatternType                  &sparsity_pattern)
+  {
+    // if we got constraints, need to add the diagonal element and, if the
+    // user requested so, also the rest of the entries in rows and columns
+    // that have been left out above
+    if (global_rows.n_constraints() > 0)
+      {
+        for (size_type i=0; i<global_rows.n_constraints(); i++)
+          {
+            const size_type local_row = global_rows.constraint_origin(i);
+            const size_type global_row = local_dof_indices[local_row];
+            if (keep_constrained_entries == true)
+              {
+                for (size_type j=0; j<local_dof_indices.size(); ++j)
+                  {
+                    if (dof_mask(local_row,j) == true)
+                      sparsity_pattern.add(global_row,
+                                           local_dof_indices[j]);
+                    if (dof_mask(j,local_row) == true)
+                      sparsity_pattern.add(local_dof_indices[j],
+                                           global_row);
+                  }
+              }
+            else
+              // don't keep constrained entries - just add the diagonal.
+              sparsity_pattern.add(global_row,global_row);
+          }
+      }
+  }
+
+} // end of namespace internals
+
+
+
+// Basic idea of setting up a list of
+// all global dofs: first find all rows and columns
+// that we are going to write touch,
+// and then go through the
+// lines and collect all the local rows that
+// are related to it.
+void
+AffineConstraints::
+make_sorted_row_list (const std::vector<size_type>   &local_dof_indices,
+                      internals::GlobalRowsFromLocal &global_rows) const
+{
+  const size_type n_local_dofs = local_dof_indices.size();
+  AssertDimension (n_local_dofs, global_rows.size());
+
+  // when distributing the local data to the global matrix, we can quite
+  // cheaply sort the indices (obviously, this introduces the need for
+  // allocating some memory on the way, but we need to do this only for rows,
+  // whereas the distribution process itself goes over rows and columns). This
+  // has the advantage that when writing into the global matrix, we can make
+  // use of the sortedness.
+
+  // so the first step is to create a sorted list of all row values that are
+  // possible. these values are either the rows from unconstrained dofs, or
+  // some indices introduced by dofs constrained to a combination of some
+  // other dofs. regarding the data type, choose a <tt>std::vector</tt> of a
+  // pair of unsigned ints (for global columns) and internal data (containing
+  // local columns + possible jumps from constraints). Choosing
+  // <tt>std::map</tt> or anything else M.K. knows of would be much more
+  // expensive here!
+
+  // cache whether we have to resolve any indirect rows generated from
+  // resolving constrained dofs.
+  size_type added_rows = 0;
+
+  // first add the indices in an unsorted way and only keep track of the
+  // constraints that appear. They are resolved in a second step.
+  for (size_type i = 0; i<n_local_dofs; ++i)
+    {
+      if (is_constrained(local_dof_indices[i]) == false)
+        {
+          global_rows.global_row(added_rows)  = local_dof_indices[i];
+          global_rows.local_row(added_rows++) = i;
+        }
+      else
+        global_rows.insert_constraint(i);
+    }
+  global_rows.sort();
+
+  const size_type n_constrained_rows = n_local_dofs-added_rows;
+  for (size_type i=0; i<n_constrained_rows; ++i)
+    {
+      const size_type local_row = global_rows.constraint_origin(i);
+      AssertIndexRange(local_row, n_local_dofs);
+      const size_type global_row = local_dof_indices[local_row];
+      Assert (is_constrained(global_row), ExcInternalError());
+      const ConstraintLine &position =
+        lines[lines_cache[calculate_line_index(global_row)]];
+      if (position.inhomogeneity != 0)
+        global_rows.set_ith_constraint_inhomogeneous (i);
+      for (size_type q=0; q<position.entries.size(); ++q)
+        global_rows.insert_index (position.entries[q].first,
+                                  local_row,
+                                  position.entries[q].second);
+    }
+}
+
+
+
+// Same function as before, but now do only extract the global indices that
+// come from the local ones without storing their origin. Used for sparsity
+// pattern generation.
+inline
+void
+AffineConstraints::
+make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
+                      std::vector<size_type>       &active_dofs) const
+{
+  const size_type n_local_dofs = local_dof_indices.size();
+  size_type added_rows = 0;
+  for (size_type i = 0; i<n_local_dofs; ++i)
+    {
+      if (is_constrained(local_dof_indices[i]) == false)
+        {
+          active_dofs[added_rows++] = local_dof_indices[i];
+          continue;
+        }
+
+      active_dofs[n_local_dofs-i+added_rows-1] = i;
+    }
+  std::sort (active_dofs.begin(), active_dofs.begin()+added_rows);
+
+  const size_type n_constrained_dofs = n_local_dofs-added_rows;
+  for (size_type i=n_constrained_dofs; i>0; --i)
+    {
+      const size_type local_row = active_dofs.back();
+
+      // remove constrained entry since we are going to resolve it in place
+      active_dofs.pop_back();
+      const size_type global_row = local_dof_indices[local_row];
+      const ConstraintLine &position =
+        lines[lines_cache[calculate_line_index(global_row)]];
+      for (size_type q=0; q<position.entries.size(); ++q)
+        {
+          const size_type new_index = position.entries[q].first;
+          if (active_dofs[active_dofs.size()-i] < new_index)
+            active_dofs.insert(active_dofs.end()-i+1,new_index);
+
+          // make binary search to find where to put the new index in order to
+          // keep the list sorted
+          else
+            {
+              std::vector<size_type>::iterator it =
+                Utilities::lower_bound(active_dofs.begin(),
+                                       active_dofs.end()-i+1,
+                                       new_index);
+              if (*it != new_index)
+                active_dofs.insert(it, new_index);
+            }
+        }
+    }
+}
+
+
+
+// Resolve the constraints from the vector and apply inhomogeneities.
+template <typename MatrixScalar, typename VectorScalar>
+inline
+typename ProductType<VectorScalar,MatrixScalar>::type
+AffineConstraints::
+resolve_vector_entry (const size_type                       i,
+                      const internals::GlobalRowsFromLocal &global_rows,
+                      const Vector<VectorScalar>           &local_vector,
+                      const std::vector<size_type>         &local_dof_indices,
+                      const FullMatrix<MatrixScalar>       &local_matrix) const
+{
+  const size_type loc_row = global_rows.local_row(i);
+  const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities();
+  typename ProductType<VectorScalar,MatrixScalar>::type val = 0;
+  // has a direct contribution from some local entry. If we have inhomogeneous
+  // constraints, compute the contribution of the inhomogeneity in the current
+  // row.
+  if (loc_row != numbers::invalid_size_type)
+    {
+      val = local_vector(loc_row);
+      for (size_type i=0; i<n_inhomogeneous_rows; ++i)
+        val -= (local_matrix(loc_row, global_rows.constraint_origin(i)) *
+                lines[lines_cache[calculate_line_index(local_dof_indices
+                                                       [global_rows.constraint_origin(i)])]].
+                inhomogeneity);
+    }
+
+  // go through the indirect contributions
+  for (size_type q=0; q<global_rows.size(i); ++q)
+    {
+      const size_type loc_row_q = global_rows.local_row(i,q);
+      typename ProductType<VectorScalar,MatrixScalar>::type add_this = local_vector (loc_row_q);
+      for (size_type k=0; k<n_inhomogeneous_rows; ++k)
+        add_this -= (local_matrix(loc_row_q,global_rows.constraint_origin(k)) *
+                     lines[lines_cache[calculate_line_index
+                                       (local_dof_indices
+                                        [global_rows.constraint_origin(k)])]].
+                     inhomogeneity);
+      val += add_this * global_rows.constraint_value(i,q);
+    }
+  return val;
+}
+
+
+// internal implementation for distribute_local_to_global for standard
+// (non-block) matrices
+template <typename MatrixType, typename VectorType>
+void
+AffineConstraints::distribute_local_to_global (
+  const FullMatrix<typename MatrixType::value_type> &local_matrix,
+  const Vector<typename VectorType::value_type>     &local_vector,
+  const std::vector<size_type>                      &local_dof_indices,
+  MatrixType                                        &global_matrix,
+  VectorType                                        &global_vector,
+  bool                                               use_inhomogeneities_for_rhs,
+  std::integral_constant<bool, false>) const
+{
+  // check whether we work on real vectors or we just used a dummy when
+  // calling the other function above.
+  const bool use_vectors = (local_vector.size() == 0 &&
+                            global_vector.size() == 0) ? false : true;
+  typedef typename MatrixType::value_type number;
+  const bool use_dealii_matrix =
+    std::is_same<MatrixType,SparseMatrix<number> >::value;
+
+  AssertDimension (local_matrix.n(), local_dof_indices.size());
+  AssertDimension (local_matrix.m(), local_dof_indices.size());
+  Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
+  if (use_vectors == true)
+    {
+      AssertDimension (local_matrix.m(), local_vector.size());
+      AssertDimension (global_matrix.m(), global_vector.size());
+    }
+  Assert (lines.empty() || sorted == true, ExcMatrixNotClosed());
+
+  const size_type n_local_dofs = local_dof_indices.size();
+
+  typename internals::AffineConstraintsData<typename MatrixType::value_type,typename VectorType::value_type>::ScratchDataAccessor
+  scratch_data;
+
+  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
+  global_rows.reinit(n_local_dofs);
+  make_sorted_row_list (local_dof_indices, global_rows);
+
+  const size_type n_actual_dofs = global_rows.size();
+
+  // create arrays for the column data (indices and values) that will then be
+  // written into the matrix. Shortcut for deal.II sparse matrix. We can use
+  // the scratch data if we have a double matrix. Otherwise, we need to create
+  // an array in any case since we cannot know about the actual data type in
+  // the AffineConstraints class (unless we do cast). This involves a little
+  // bit of logic to determine the type of the matrix value.
+  std::vector<size_type> &cols           = scratch_data->columns;
+  std::vector<number>    &vals           = scratch_data->values;
+  // create arrays for writing into the vector as well
+  std::vector<size_type> &vector_indices = scratch_data->vector_indices;
+  std::vector<typename VectorType::value_type> &vector_values  = scratch_data->vector_values;
+  vector_indices.resize(n_actual_dofs);
+  vector_values.resize(n_actual_dofs);
+  SparseMatrix<number> *sparse_matrix
+    = dynamic_cast<SparseMatrix<number> *>(&global_matrix);
+  if (use_dealii_matrix == false)
+    {
+      cols.resize (n_actual_dofs);
+      vals.resize (n_actual_dofs);
+    }
+  else
+    Assert (sparse_matrix != nullptr, ExcInternalError());
+
+  // now do the actual job. go through all the global rows that we will touch
+  // and call resolve_matrix_row for each of those.
+  size_type local_row_n = 0;
+  for (size_type i=0; i<n_actual_dofs; ++i)
+    {
+      const size_type row = global_rows.global_row(i);
+
+      // calculate all the data that will be written into the matrix row.
+      if (use_dealii_matrix == false)
+        {
+          size_type *col_ptr = &cols[0];
+          // cast is uncritical here and only used to avoid compiler
+          // warnings. We never access a non-double array
+          number *val_ptr = &vals[0];
+          internals::resolve_matrix_row (global_rows, global_rows, i, 0,
+                                         n_actual_dofs,
+                                         local_matrix, col_ptr, val_ptr);
+          const size_type n_values = col_ptr - &cols[0];
+          if (n_values > 0)
+            global_matrix.add(row, n_values, &cols[0], &vals[0], false,
+                              true);
+        }
+      else
+        internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs,
+                                       local_matrix, sparse_matrix);
+
+      // now to the vectors. besides doing the same job as we did above (i.e.,
+      // distribute the content of the local vector into the global one), need
+      // to account for inhomogeneities here: this corresponds to eliminating
+      // the respective column in the local matrix with value on the right
+      // hand side.
+      if (use_vectors == true)
+        {
+          const typename VectorType::value_type
+          val = resolve_vector_entry (i, global_rows,
+                                      local_vector,
+                                      local_dof_indices,
+                                      local_matrix);
+          AssertIsFinite(val);
+
+          if (val != typename VectorType::value_type ())
+            {
+              vector_indices[local_row_n] = row;
+              vector_values[local_row_n] = val;
+              ++local_row_n;
+            }
+        }
+    }
+  // Drop the elements of vector_indices and vector_values that we do not use (we may
+  // always elide writing zero values to vectors)
+  const size_type n_local_rows = local_row_n;
+  vector_indices.resize(n_local_rows);
+  vector_values.resize(n_local_rows);
+
+  // While the standard case is that these types are equal, they need not be, so
+  // only do a bulk update if they are. Note that the types in the arguments to
+  // add must be equal if we have a Trilinos or PETSc vector but do not have to
+  // be if we have a deal.II native vector: one could further optimize this for
+  // Vector, LinearAlgebra::distributed::vector, etc.
+  if (std::is_same<typename VectorType::value_type, number>::value)
+    {
+      global_vector.add(vector_indices,
+                        *reinterpret_cast<std::vector<number> *>(&vector_values));
+    }
+  else
+    {
+      for (size_type row_n=0; row_n<n_local_rows; ++row_n)
+        {
+          global_vector(vector_indices[row_n]) +=
+            static_cast<typename VectorType::value_type>(vector_values[row_n]);
+        }
+    }
+
+  internals::set_matrix_diagonals (global_rows, local_dof_indices,
+                                   local_matrix, *this,
+                                   global_matrix, global_vector, use_inhomogeneities_for_rhs);
+}
+
+
+
+// similar function as above, but now specialized for block matrices. See the
+// other function for additional comments.
+template <typename MatrixType, typename VectorType>
+void
+AffineConstraints::
+distribute_local_to_global (
+  const FullMatrix<typename MatrixType::value_type> &local_matrix,
+  const Vector<typename VectorType::value_type>     &local_vector,
+  const std::vector<size_type>                      &local_dof_indices,
+  MatrixType                                        &global_matrix,
+  VectorType                                        &global_vector,
+  bool                                               use_inhomogeneities_for_rhs,
+  std::integral_constant<bool, true>) const
+{
+  const bool use_vectors = (local_vector.size() == 0 &&
+                            global_vector.size() == 0) ? false : true;
+  typedef typename MatrixType::value_type number;
+  const bool use_dealii_matrix =
+    std::is_same<MatrixType,BlockSparseMatrix<number> >::value;
+
+  AssertDimension (local_matrix.n(), local_dof_indices.size());
+  AssertDimension (local_matrix.m(), local_dof_indices.size());
+  Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
+  Assert (global_matrix.n_block_rows() == global_matrix.n_block_cols(),
+          ExcNotQuadratic());
+  if (use_vectors == true)
+    {
+      AssertDimension (local_matrix.m(), local_vector.size());
+      AssertDimension (global_matrix.m(), global_vector.size());
+    }
+  Assert (sorted == true, ExcMatrixNotClosed());
+
+  typename internals::AffineConstraintsData<typename MatrixType::value_type,typename VectorType::value_type>::ScratchDataAccessor
+  scratch_data;
+
+  const size_type n_local_dofs = local_dof_indices.size();
+  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
+  global_rows.reinit(n_local_dofs);
+
+  make_sorted_row_list (local_dof_indices, global_rows);
+  const size_type n_actual_dofs = global_rows.size();
+
+  std::vector<size_type> &global_indices = scratch_data->vector_indices;
+  if (use_vectors == true)
+    {
+      global_indices.resize(n_actual_dofs);
+      for (size_type i=0; i<n_actual_dofs; ++i)
+        global_indices[i] = global_rows.global_row(i);
+    }
+
+  // additional construct that also takes care of block indices.
+  const size_type num_blocks   = global_matrix.n_block_rows();
+  std::vector<size_type> &block_starts = scratch_data->block_starts;
+  block_starts.resize(num_blocks+1);
+  internals::make_block_starts (global_matrix, global_rows, block_starts);
+
+  std::vector<size_type> &cols = scratch_data->columns;
+  std::vector<number>     &vals = scratch_data->values;
+  if (use_dealii_matrix == false)
+    {
+      cols.resize (n_actual_dofs);
+      vals.resize (n_actual_dofs);
+    }
+
+  // the basic difference to the non-block variant from now onwards is that we
+  // go through the blocks of the matrix separately, which allows us to set
+  // the block entries individually
+  for (size_type block=0; block<num_blocks; ++block)
+    {
+      const size_type next_block = block_starts[block+1];
+      for (size_type i=block_starts[block]; i<next_block; ++i)
+        {
+          const size_type row = global_rows.global_row(i);
+
+          for (size_type block_col=0; block_col<num_blocks; ++block_col)
+            {
+              const size_type start_block = block_starts[block_col],
+                              end_block = block_starts[block_col+1];
+              if (use_dealii_matrix == false)
+                {
+                  size_type *col_ptr = &cols[0];
+                  number *val_ptr = &vals[0];
+                  internals::resolve_matrix_row (global_rows, global_rows, i,
+                                                 start_block, end_block,
+                                                 local_matrix, col_ptr, val_ptr);
+                  const size_type n_values = col_ptr - &cols[0];
+                  if (n_values > 0)
+                    global_matrix.block(block, block_col).add(row, n_values,
+                                                              &cols[0], &vals[0],
+                                                              false, true);
+                }
+              else
+                {
+                  SparseMatrix<number> *sparse_matrix
+                    = dynamic_cast<SparseMatrix<number> *>(&global_matrix.block(block,
+                                                           block_col));
+                  Assert (sparse_matrix != nullptr, ExcInternalError());
+                  internals::resolve_matrix_row (global_rows, i, start_block,
+                                                 end_block, local_matrix, sparse_matrix);
+                }
+            }
+
+          if (use_vectors == true)
+            {
+              const number val = resolve_vector_entry (i, global_rows,
+                                                       local_vector,
+                                                       local_dof_indices,
+                                                       local_matrix);
+
+              if (val != number ())
+                global_vector(global_indices[i]) +=
+                  static_cast<typename VectorType::value_type>(val);
+            }
+        }
+    }
+
+  internals::set_matrix_diagonals (global_rows, local_dof_indices,
+                                   local_matrix, *this,
+                                   global_matrix, global_vector, use_inhomogeneities_for_rhs);
+}
+
+
+
+template <typename MatrixType>
+void
+AffineConstraints::distribute_local_to_global (
+  const FullMatrix<typename MatrixType::value_type> &local_matrix,
+  const std::vector<size_type>                      &row_indices,
+  const std::vector<size_type>                      &col_indices,
+  MatrixType                                        &global_matrix) const
+{
+  distribute_local_to_global(local_matrix, row_indices, *this,
+                             col_indices, global_matrix);
+}
+
+
+
+template <typename MatrixType>
+void
+AffineConstraints::distribute_local_to_global (
+  const FullMatrix<typename MatrixType::value_type> &local_matrix,
+  const std::vector<size_type>                      &row_indices,
+  const AffineConstraints                           &col_constraint_matrix,
+  const std::vector<size_type>                      &col_indices,
+  MatrixType                                        &global_matrix) const
+{
+  typedef typename MatrixType::value_type number;
+
+  AssertDimension (local_matrix.m(), row_indices.size());
+  AssertDimension (local_matrix.n(), col_indices.size());
+
+  const size_type n_local_row_dofs = row_indices.size();
+  const size_type n_local_col_dofs = col_indices.size();
+
+  typename internals::AffineConstraintsData<typename MatrixType::value_type>::ScratchDataAccessor
+  scratch_data;
+  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
+  global_rows.reinit(n_local_row_dofs);
+  internals::GlobalRowsFromLocal &global_cols = scratch_data->global_columns;
+  global_cols.reinit(n_local_col_dofs);
+  make_sorted_row_list (row_indices, global_rows);
+  col_constraint_matrix.make_sorted_row_list (col_indices, global_cols);
+
+  const size_type n_actual_row_dofs = global_rows.size();
+  const size_type n_actual_col_dofs = global_cols.size();
+
+  // create arrays for the column data (indices and values) that will then be
+  // written into the matrix. Shortcut for deal.II sparse matrix
+  std::vector<size_type> &cols = scratch_data->columns;
+  std::vector<number>    &vals = scratch_data->values;
+  cols.resize(n_actual_col_dofs);
+  vals.resize(n_actual_col_dofs);
+
+  // now do the actual job.
+  for (size_type i=0; i<n_actual_row_dofs; ++i)
+    {
+      const size_type row = global_rows.global_row(i);
+
+      // calculate all the data that will be written into the matrix row.
+      size_type *col_ptr = &cols[0];
+      number    *val_ptr = &vals[0];
+      internals::resolve_matrix_row (global_rows, global_cols, i, 0,
+                                     n_actual_col_dofs,
+                                     local_matrix, col_ptr, val_ptr);
+      const size_type n_values = col_ptr - &cols[0];
+      if (n_values > 0)
+        global_matrix.add(row, n_values, &cols[0], &vals[0], false, true);
+    }
+}
+
+
+
+template <typename SparsityPatternType>
+void
+AffineConstraints::
+add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+                             SparsityPatternType          &sparsity_pattern,
+                             const bool                    keep_constrained_entries,
+                             const Table<2,bool>          &dof_mask,
+                             std::integral_constant<bool, false> ) const
+{
+  Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic());
+
+  const size_type n_local_dofs = local_dof_indices.size();
+  bool dof_mask_is_active = false;
+  if (dof_mask.n_rows() == n_local_dofs)
+    {
+      dof_mask_is_active = true;
+      AssertDimension (dof_mask.n_cols(), n_local_dofs);
+    }
+
+  internals::AffineConstraintsData<double>::ScratchDataAccessor scratch_data;
+
+  // if the dof mask is not active, all we have to do is to add some indices
+  // in a matrix format. To do this, we first create an array of all the
+  // indices that are to be added. these indices are the local dof indices
+  // plus some indices that come from constraints.
+  if (dof_mask_is_active == false)
+    {
+      std::vector<size_type> &actual_dof_indices = scratch_data->columns;
+      actual_dof_indices.resize(n_local_dofs);
+      make_sorted_row_list (local_dof_indices, actual_dof_indices);
+      const size_type n_actual_dofs = actual_dof_indices.size();
+
+      // now add the indices we collected above to the sparsity pattern. Very
+      // easy here - just add the same array to all the rows...
+      for (size_type i=0; i<n_actual_dofs; ++i)
+        sparsity_pattern.add_entries(actual_dof_indices[i],
+                                     actual_dof_indices.begin(),
+                                     actual_dof_indices.end(),
+                                     true);
+
+      // need to add the whole row and column structure in case we keep
+      // constrained entries. Unfortunately, we can't use the nice matrix
+      // structure we use elsewhere, so manually add those indices one by one.
+      for (size_type i=0; i<n_local_dofs; i++)
+        if (is_constrained(local_dof_indices[i]))
+          {
+            if (keep_constrained_entries == true)
+              for (size_type j=0; j<n_local_dofs; j++)
+                {
+                  sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
+                  sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
+                }
+            else
+              sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]);
+          }
+
+      return;
+    }
+
+
+  // complicated case: we need to filter out some indices. then the function
+  // gets similar to the function for distributing matrix entries, see there
+  // for additional comments.
+  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
+  global_rows.reinit(n_local_dofs);
+  make_sorted_row_list (local_dof_indices, global_rows);
+  const size_type n_actual_dofs = global_rows.size();
+
+  // create arrays for the column indices that will then be written into the
+  // sparsity pattern.
+  std::vector<size_type> &cols = scratch_data->columns;
+  cols.resize(n_actual_dofs);
+
+  for (size_type i=0; i<n_actual_dofs; ++i)
+    {
+      std::vector<size_type>::iterator col_ptr = cols.begin();
+      const size_type row = global_rows.global_row(i);
+      internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs,
+                                     dof_mask, col_ptr);
+
+      // finally, write all the information that accumulated under the given
+      // process into the global matrix row and into the vector
+      if (col_ptr != cols.begin())
+        sparsity_pattern.add_entries(row, cols.begin(), col_ptr,
+                                     true);
+    }
+  internals::set_sparsity_diagonals (global_rows, local_dof_indices,
+                                     dof_mask, keep_constrained_entries,
+                                     sparsity_pattern);
+}
+
+
+
+
+template <typename SparsityPatternType>
+void
+AffineConstraints::
+add_entries_local_to_global (const std::vector<size_type> &row_indices,
+                             const std::vector<size_type> &col_indices,
+                             SparsityPatternType          &sparsity_pattern,
+                             const bool                    keep_constrained_entries,
+                             const Table<2,bool>          &dof_mask) const
+{
+  const size_type n_local_rows = row_indices.size();
+  const size_type n_local_cols = col_indices.size();
+  bool dof_mask_is_active = false;
+  if (dof_mask.n_rows() == n_local_rows && dof_mask.n_cols() == n_local_cols)
+    dof_mask_is_active = true;
+
+  // if constrained entries should be kept, need to add rows and columns of
+  // those to the sparsity pattern
+  if (keep_constrained_entries == true)
+    {
+      for (size_type i=0; i<row_indices.size(); i++)
+        if (is_constrained(row_indices[i]))
+          for (size_type j=0; j<col_indices.size(); j++)
+            sparsity_pattern.add (row_indices[i], col_indices[j]);
+      for (size_type i=0; i<col_indices.size(); i++)
+        if (is_constrained(col_indices[i]))
+          for (size_type j=0; j<row_indices.size(); j++)
+            sparsity_pattern.add (row_indices[j], col_indices[i]);
+    }
+
+  // if the dof mask is not active, all we have to do is to add some indices
+  // in a matrix format. To do this, we first create an array of all the
+  // indices that are to be added. these indices are the local dof indices
+  // plus some indices that come from constraints.
+  if (dof_mask_is_active == false)
+    {
+      std::vector<size_type> actual_row_indices (n_local_rows);
+      std::vector<size_type> actual_col_indices (n_local_cols);
+      make_sorted_row_list (row_indices, actual_row_indices);
+      make_sorted_row_list (col_indices, actual_col_indices);
+      const size_type n_actual_rows = actual_row_indices.size();
+
+      // now add the indices we collected above to the sparsity pattern. Very
+      // easy here - just add the same array to all the rows...
+      for (size_type i=0; i<n_actual_rows; ++i)
+        sparsity_pattern.add_entries(actual_row_indices[i],
+                                     actual_col_indices.begin(),
+                                     actual_col_indices.end(),
+                                     true);
+      return;
+    }
+
+
+  // TODO: implement this
+  Assert (false, ExcNotImplemented());
+}
+
+
+
+
+template <typename SparsityPatternType>
+void
+AffineConstraints::
+add_entries_local_to_global (const std::vector<size_type> &local_dof_indices,
+                             SparsityPatternType          &sparsity_pattern,
+                             const bool                    keep_constrained_entries,
+                             const Table<2,bool>          &dof_mask,
+                             std::integral_constant<bool, true> ) const
+{
+  // just as the other add_entries_local_to_global function, but now
+  // specialized for block matrices.
+  Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic());
+  Assert (sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(),
+          ExcNotQuadratic());
+
+  const size_type n_local_dofs = local_dof_indices.size();
+  const size_type num_blocks = sparsity_pattern.n_block_rows();
+
+  internals::AffineConstraintsData<double>::ScratchDataAccessor scratch_data;
+
+  bool dof_mask_is_active = false;
+  if (dof_mask.n_rows() == n_local_dofs)
+    {
+      dof_mask_is_active = true;
+      AssertDimension (dof_mask.n_cols(), n_local_dofs);
+    }
+
+  if (dof_mask_is_active == false)
+    {
+      std::vector<size_type> &actual_dof_indices = scratch_data->columns;
+      actual_dof_indices.resize(n_local_dofs);
+      make_sorted_row_list (local_dof_indices, actual_dof_indices);
+      const size_type n_actual_dofs = actual_dof_indices.size();
+      (void)n_actual_dofs;
+
+      // additional construct that also takes care of block indices.
+      std::vector<size_type> &block_starts = scratch_data->block_starts;
+      block_starts.resize(num_blocks+1);
+      internals::make_block_starts (sparsity_pattern, actual_dof_indices,
+                                    block_starts);
+
+      for (size_type block=0; block<num_blocks; ++block)
+        {
+          const size_type next_block = block_starts[block+1];
+          for (size_type i=block_starts[block]; i<next_block; ++i)
+            {
+              Assert (i<n_actual_dofs, ExcInternalError());
+              const size_type row = actual_dof_indices[i];
+              Assert (row < sparsity_pattern.block(block,0).n_rows(),
+                      ExcInternalError());
+              std::vector<size_type>::iterator index_it = actual_dof_indices.begin();
+              for (size_type block_col = 0; block_col<num_blocks; ++block_col)
+                {
+                  const size_type next_block_col = block_starts[block_col+1];
+                  sparsity_pattern.block(block,block_col).
+                  add_entries(row,
+                              index_it,
+                              actual_dof_indices.begin() + next_block_col,
+                              true);
+                  index_it = actual_dof_indices.begin() + next_block_col;
+                }
+            }
+        }
+
+      for (size_type i=0; i<n_local_dofs; i++)
+        if (is_constrained(local_dof_indices[i]))
+          {
+            if (keep_constrained_entries == true)
+              for (size_type j=0; j<n_local_dofs; j++)
+                {
+                  sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
+                  sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
+                }
+            else
+              sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]);
+          }
+
+      return;
+    }
+
+  // difficult case with dof_mask, similar to the distribute_local_to_global
+  // function for block matrices
+  internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows;
+  global_rows.reinit(n_local_dofs);
+  make_sorted_row_list (local_dof_indices, global_rows);
+  const size_type n_actual_dofs = global_rows.size();
+
+  // additional construct that also takes care of block indices.
+  std::vector<size_type> &block_starts = scratch_data->block_starts;
+  block_starts.resize(num_blocks+1);
+  internals::make_block_starts(sparsity_pattern, global_rows, block_starts);
+
+  std::vector<size_type> &cols = scratch_data->columns;
+  cols.resize(n_actual_dofs);
+
+  // the basic difference to the non-block variant from now onwards is that we
+  // go through the blocks of the matrix separately.
+  for (size_type block=0; block<num_blocks; ++block)
+    {
+      const size_type next_block = block_starts[block+1];
+      for (size_type i=block_starts[block]; i<next_block; ++i)
+        {
+          const size_type row = global_rows.global_row(i);
+          for (size_type block_col=0; block_col<num_blocks; ++block_col)
+            {
+              const size_type begin_block = block_starts[block_col],
+                              end_block = block_starts[block_col+1];
+              std::vector<size_type>::iterator col_ptr = cols.begin();
+              internals::resolve_matrix_row (global_rows, i, begin_block,
+                                             end_block, dof_mask, col_ptr);
+
+              sparsity_pattern.block(block, block_col).add_entries(row,
+                                                                   cols.begin(),
+                                                                   col_ptr,
+                                                                   true);
+            }
+        }
+    }
+
+  internals::set_sparsity_diagonals (global_rows, local_dof_indices,
+                                     dof_mask, keep_constrained_entries,
+                                     sparsity_pattern);
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index 15a7e8b96a45e44bbf6e7cdbd0b7f89354c43079..c953d5f7b4683c8db4e392837aecf98a6810fa0f 100644 (file)
@@ -16,6 +16,7 @@
 INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
 
 SET(_unity_include_src
+  affine_constraints.cc
   block_matrix_array.cc
   block_sparse_matrix.cc
   block_sparse_matrix_ez.cc
@@ -62,6 +63,7 @@ SET(_separate_src
   )
 
 SET(_inst
+  affine_constraints.inst.in
   block_sparse_matrix.inst.in
   block_vector.inst.in
   chunk_sparse_matrix.inst.in
diff --git a/source/lac/affine_constraints.cc b/source/lac/affine_constraints.cc
new file mode 100644 (file)
index 0000000..dc9f3a4
--- /dev/null
@@ -0,0 +1,1582 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 1998 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/lac/affine_constraints.h>
+#include <deal.II/lac/affine_constraints.templates.h>
+
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/block_sparse_matrix.h>
+#include <deal.II/lac/sparse_matrix_ez.h>
+#include <deal.II/lac/chunk_sparse_matrix.h>
+#include <deal.II/lac/block_sparse_matrix_ez.h>
+#include <deal.II/lac/la_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_block_sparse_matrix.h>
+#include <deal.II/lac/matrix_block.h>
+#include <deal.II/lac/diagonal_matrix.h>
+
+#include <algorithm>
+#include <numeric>
+#include <set>
+#include <ostream>
+#include <boost/serialization/utility.hpp>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+
+// Static member variable
+const Table<2,bool> AffineConstraints::default_empty_table = Table<2,bool>();
+
+
+
+void
+AffineConstraints::copy_from (const AffineConstraints &other)
+{
+  lines       = other.lines;
+  lines_cache = other.lines_cache;
+  local_lines = other.local_lines;
+  sorted      = other.sorted;
+}
+
+
+
+bool
+AffineConstraints::check_zero_weight (const std::pair<size_type, double> &p)
+{
+  return (p.second == 0);
+}
+
+
+
+bool
+AffineConstraints::ConstraintLine::operator < (const ConstraintLine &a) const
+{
+  return index < a.index;
+}
+
+
+
+bool
+AffineConstraints::ConstraintLine::operator == (const ConstraintLine &a) const
+{
+  return index == a.index;
+}
+
+
+
+std::size_t
+AffineConstraints::ConstraintLine::memory_consumption () const
+{
+  return (MemoryConsumption::memory_consumption (index) +
+          MemoryConsumption::memory_consumption (entries) +
+          MemoryConsumption::memory_consumption (inhomogeneity));
+}
+
+
+
+const AffineConstraints::LineRange AffineConstraints::get_lines() const
+{
+  return boost::make_iterator_range(lines.begin(), lines.end());
+}
+
+
+
+bool AffineConstraints::is_consistent_in_parallel(const std::vector<IndexSet> &locally_owned_dofs,
+                                                  const IndexSet &locally_active_dofs,
+                                                  const MPI_Comm mpi_communicator,
+                                                  const bool verbose) const
+{
+  ConstraintLine empty;
+  empty.inhomogeneity = 0.0;
+
+  // Helper to return a reference to the ConstraintLine object that belongs to row @p row.
+  // We don't want to make copies but to return a reference, we need an empty object that
+  // we store above.
+  auto get_line = [&] (const size_type row) -> const ConstraintLine&
+  {
+    const size_type line_index = calculate_line_index(row);
+    if (line_index >= lines_cache.size() ||
+        lines_cache[line_index] == numbers::invalid_size_type)
+      {
+        empty.index = row;
+        return empty;
+      }
+    else
+      return lines[lines_cache[line_index]];
+  };
+
+  // identify non-owned rows and send to owner:
+  std::map< unsigned int, std::vector<ConstraintLine> > to_send;
+
+  const unsigned int myid = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
+  const unsigned int nproc = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+  // We will send all locally active dofs that are not locally owned for checking. Note
+  // that we allow constraints to differ on locally_relevant (and not active) DoFs.
+  IndexSet non_owned = locally_active_dofs;
+  non_owned.subtract_set(locally_owned_dofs[myid]);
+  for (unsigned int owner=0; owner<nproc; ++owner)
+    {
+      // find all lines to send to @p owner
+      IndexSet indices_to_send = non_owned & locally_owned_dofs[owner];
+      for (const auto &row_idx : indices_to_send)
+        {
+          to_send[owner].push_back(get_line(row_idx));
+        }
+    }
+
+  std::map<unsigned int, std::vector<ConstraintLine> > received = Utilities::MPI::some_to_some (mpi_communicator, to_send);
+
+  unsigned int inconsistent = 0;
+
+  // from each processor:
+  for (const auto &kv : received)
+    {
+      // for each incoming line:
+      for (auto &lineit : kv.second)
+        {
+          const ConstraintLine &reference = get_line(lineit.index);
+
+          if (lineit.inhomogeneity != reference.inhomogeneity)
+            {
+              ++inconsistent;
+
+              if (verbose)
+                std::cout << "Proc " << myid
+                          << " got line " << lineit.index
+                          << " from " << kv.first
+                          << " inhomogeneity " << lineit.inhomogeneity << " != " << reference.inhomogeneity << std::endl;
+            }
+          else if (lineit.entries != reference.entries)
+            {
+              ++inconsistent;
+              if (verbose)
+                std::cout << "Proc " << myid
+                          << " got line " << lineit.index
+                          << " from " << kv.first
+                          << " wrong values!"
+                          << std::endl;
+            }
+        }
+    }
+
+  const unsigned int total = Utilities::MPI::sum(inconsistent, mpi_communicator);
+  if (verbose && total>0 && myid==0)
+    std::cout << total << " inconsistent lines discovered!" << std::endl;
+  return total==0;
+}
+
+
+
+void
+AffineConstraints::add_lines (const std::set<size_type> &lines)
+{
+  for (std::set<size_type>::const_iterator
+       i = lines.begin(); i != lines.end(); ++i)
+    add_line (*i);
+}
+
+
+
+void
+AffineConstraints::add_lines (const std::vector<bool> &lines)
+{
+  for (size_type i=0; i<lines.size(); ++i)
+    if (lines[i] == true)
+      add_line (i);
+}
+
+
+
+void
+AffineConstraints::add_lines (const IndexSet &lines)
+{
+  for (size_type i=0; i<lines.n_elements(); ++i)
+    add_line (lines.nth_index_in_set(i));
+}
+
+
+
+void
+AffineConstraints::add_entries
+(const size_type                                  line,
+ const std::vector<std::pair<size_type,double> > &col_val_pairs)
+{
+  Assert (sorted==false, ExcMatrixIsClosed());
+  Assert (is_constrained(line), ExcLineInexistant(line));
+
+  ConstraintLine *line_ptr = &lines[lines_cache[calculate_line_index(line)]];
+  Assert (line_ptr->index == line, ExcInternalError());
+
+  // if in debug mode, check whether an entry for this column already
+  // exists and if its the same as the one entered at present
+  //
+  // in any case: skip this entry if an entry for this column already
+  // exists, since we don't want to enter it twice
+  for (std::vector<std::pair<size_type,double> >::const_iterator
+       col_val_pair = col_val_pairs.begin();
+       col_val_pair!=col_val_pairs.end(); ++col_val_pair)
+    {
+      Assert (line != col_val_pair->first,
+              ExcMessage ("Can't constrain a degree of freedom to itself"));
+
+      for (ConstraintLine::Entries::const_iterator
+           p=line_ptr->entries.begin();
+           p != line_ptr->entries.end(); ++p)
+        if (p->first == col_val_pair->first)
+          {
+            // entry exists, break innermost loop
+            Assert (p->second == col_val_pair->second,
+                    ExcEntryAlreadyExists(line, col_val_pair->first,
+                                          p->second, col_val_pair->second));
+            break;
+          }
+
+      line_ptr->entries.push_back (*col_val_pair);
+    }
+}
+
+
+
+void AffineConstraints::add_selected_constraints(
+  const AffineConstraints &constraints,
+  const IndexSet          &filter)
+{
+  if (constraints.n_constraints() == 0)
+    return;
+
+  Assert (filter.size() > constraints.lines.back().index,
+          ExcMessage ("Filter needs to be larger than constraint matrix size."));
+  for (std::vector<ConstraintLine>::const_iterator line=constraints.lines.begin();
+       line!=constraints.lines.end(); ++line)
+    if (filter.is_element(line->index))
+      {
+        const size_type row = filter.index_within_set (line->index);
+        add_line (row);
+        set_inhomogeneity (row, line->inhomogeneity);
+        for (size_type i=0; i<line->entries.size(); ++i)
+          if (filter.is_element(line->entries[i].first))
+            add_entry (row, filter.index_within_set (line->entries[i].first),
+                       line->entries[i].second);
+      }
+}
+
+
+
+void AffineConstraints::close ()
+{
+  if (sorted == true)
+    return;
+
+  // sort the lines
+  std::sort (lines.begin(), lines.end());
+
+  // update list of pointers and give the vector a sharp size since we
+  // won't modify the size any more after this point.
+  {
+    std::vector<size_type> new_lines (lines_cache.size(),
+                                      numbers::invalid_size_type);
+    size_type counter = 0;
+    for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
+         line!=lines.end(); ++line, ++counter)
+      new_lines[calculate_line_index(line->index)] = counter;
+    std::swap (lines_cache, new_lines);
+  }
+
+  // in debug mode: check whether we really set the pointers correctly.
+  for (size_type i=0; i<lines_cache.size(); ++i)
+    if (lines_cache[i] != numbers::invalid_size_type)
+      Assert (i == calculate_line_index(lines[lines_cache[i]].index),
+              ExcInternalError());
+
+  // first, strip zero entries, as we have to do that only once
+  for (std::vector<ConstraintLine>::iterator line = lines.begin();
+       line!=lines.end(); ++line)
+    // first remove zero entries. that would mean that in the linear
+    // constraint for a node, x_i = ax_1 + bx_2 + ..., another node times 0
+    // appears. obviously, 0*something can be omitted
+    line->entries.erase (std::remove_if (line->entries.begin(),
+                                         line->entries.end(),
+                                         &check_zero_weight),
+                         line->entries.end());
+
+
+
+#ifdef DEBUG
+  // In debug mode we are computing an estimate for the maximum number
+  // of constraints so that we can bail out if there is a cycle in the
+  // constraints (which is easier than searching for cycles in the graph).
+  //
+  // Let us figure out the largest dof index. This is an upper bound for the
+  // number of constraints because it is an approximation for the number of dofs
+  // in our system.
+  size_type largest_idx = 0;
+  for (std::vector<ConstraintLine>::iterator line = lines.begin();
+       line!=lines.end(); ++line)
+    {
+      for (ConstraintLine::Entries::iterator it = line->entries.begin(); it!=line->entries.end(); ++it)
+        {
+          largest_idx=std::max(largest_idx, it->first);
+        }
+    }
+#endif
+
+  // replace references to dofs that are themselves constrained. note that
+  // because we may replace references to other dofs that may themselves be
+  // constrained to third ones, we have to iterate over all this until we
+  // replace no chains of constraints any more
+  //
+  // the iteration replaces references to constrained degrees of freedom by
+  // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2,
+  // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear
+  // twice. we will throw this duplicate out in the following step, where
+  // we sort the list so that throwing out duplicates becomes much more
+  // efficient. also, we have to do it only once, rather than in each
+  // iteration
+  size_type iteration = 0;
+  while (true)
+    {
+      bool chained_constraint_replaced = false;
+
+      for (std::vector<ConstraintLine>::iterator line = lines.begin();
+           line!=lines.end(); ++line)
+        {
+#ifdef DEBUG
+          // we need to keep track of how many replacements we do in this line, because we can
+          // end up in a cycle A->B->C->A without the number of entries growing.
+          size_type n_replacements = 0;
+#endif
+
+          // loop over all entries of this line (including ones that we
+          // have appended in this go around) and see whether they are
+          // further constrained. ignore elements that we don't store on
+          // the current processor
+          size_type entry = 0;
+          while (entry < line->entries.size())
+            if (((local_lines.size() == 0)
+                 ||
+                 (local_lines.is_element(line->entries[entry].first)))
+                &&
+                is_constrained (line->entries[entry].first))
+              {
+                // ok, this entry is further constrained:
+                chained_constraint_replaced = true;
+
+                // look up the chain of constraints for this entry
+                const size_type  dof_index = line->entries[entry].first;
+                const double     weight = line->entries[entry].second;
+
+                Assert (dof_index != line->index,
+                        ExcMessage ("Cycle in constraints detected!"));
+
+                const ConstraintLine *constrained_line =
+                  &lines[lines_cache[calculate_line_index(dof_index)]];
+                Assert (constrained_line->index == dof_index,
+                        ExcInternalError());
+
+                // now we have to replace an entry by its expansion. we do
+                // that by overwriting the entry by the first entry of the
+                // expansion and adding the remaining ones to the end,
+                // where we will later process them once more
+                //
+                // we can of course only do that if the DoF that we are
+                // currently handle is constrained by a linear combination
+                // of other dofs:
+                if (constrained_line->entries.size() > 0)
+                  {
+                    for (size_type i=0; i<constrained_line->entries.size(); ++i)
+                      Assert (dof_index != constrained_line->entries[i].first,
+                              ExcMessage ("Cycle in constraints detected!"));
+
+                    // replace first entry, then tack the rest to the end
+                    // of the list
+                    line->entries[entry] =
+                      std::make_pair (constrained_line->entries[0].first,
+                                      constrained_line->entries[0].second *
+                                      weight);
+
+                    for (size_type i=1; i<constrained_line->entries.size(); ++i)
+                      line->entries.emplace_back (constrained_line->entries[i].first,
+                                                  constrained_line->entries[i].second
+                                                  * weight);
+
+#ifdef DEBUG
+                    // keep track of how many entries we replace in this
+                    // line. If we do more than there are constraints or
+                    // dofs in our system, we must have a cycle.
+                    ++n_replacements;
+                    Assert(n_replacements/2<largest_idx, ExcMessage("Cycle in constraints detected!"));
+                    if (n_replacements/2>=largest_idx)
+                      return; // this enables us to test for this Exception.
+#endif
+                  }
+                else
+                  // the DoF that we encountered is not constrained by a
+                  // linear combination of other dofs but is equal to just
+                  // the inhomogeneity (i.e. its chain of entries is
+                  // empty). in that case, we can't just overwrite the
+                  // current entry, but we have to actually eliminate it
+                  {
+                    line->entries.erase (line->entries.begin()+entry);
+                  }
+
+                line->inhomogeneity += constrained_line->inhomogeneity *
+                                       weight;
+
+                // now that we're here, do not increase index by one but
+                // rather make another pass for the present entry because
+                // we have replaced the present entry by another one, or
+                // because we have deleted it and shifted all following
+                // ones one forward
+              }
+            else
+              // entry not further constrained. just move ahead by one
+              ++entry;
+        }
+
+      // if we didn't do anything in this round, then quit the loop
+      if (chained_constraint_replaced == false)
+        break;
+
+      // increase iteration count. note that we should not iterate more
+      // times than there are constraints, since this puts a natural upper
+      // bound on the length of constraint chains
+      ++iteration;
+      Assert (iteration <= lines.size(), ExcInternalError());
+    }
+
+  // finally sort the entries and re-scale them if necessary. in this step,
+  // we also throw out duplicates as mentioned above. moreover, as some
+  // entries might have had zero weights, we replace them by a vector with
+  // sharp sizes.
+  for (std::vector<ConstraintLine>::iterator line = lines.begin();
+       line!=lines.end(); ++line)
+    {
+      std::sort (line->entries.begin(), line->entries.end());
+
+      // loop over the now sorted list and see whether any of the entries
+      // references the same dofs more than once in order to find how many
+      // non-duplicate entries we have. This lets us allocate the correct
+      // amount of memory for the constraint entries.
+      size_type duplicates = 0;
+      for (size_type i=1; i<line->entries.size(); ++i)
+        if (line->entries[i].first == line->entries[i-1].first)
+          duplicates++;
+
+      if (duplicates > 0 || line->entries.size() < line->entries.capacity())
+        {
+          ConstraintLine::Entries new_entries;
+
+          // if we have no duplicates, copy verbatim the entries. this way,
+          // the final size is of the vector is correct.
+          if (duplicates == 0)
+            new_entries = line->entries;
+          else
+            {
+              // otherwise, we need to go through the list by and and
+              // resolve the duplicates
+              new_entries.reserve (line->entries.size() - duplicates);
+              new_entries.push_back(line->entries[0]);
+              for (size_type j=1; j<line->entries.size(); ++j)
+                if (line->entries[j].first == line->entries[j-1].first)
+                  {
+                    Assert (new_entries.back().first == line->entries[j].first,
+                            ExcInternalError());
+                    new_entries.back().second += line->entries[j].second;
+                  }
+                else
+                  new_entries.push_back (line->entries[j]);
+
+              Assert (new_entries.size() == line->entries.size() - duplicates,
+                      ExcInternalError());
+
+              // make sure there are really no duplicates left and that the
+              // list is still sorted
+              for (size_type j=1; j<new_entries.size(); ++j)
+                {
+                  Assert (new_entries[j].first != new_entries[j-1].first,
+                          ExcInternalError());
+                  Assert (new_entries[j].first > new_entries[j-1].first,
+                          ExcInternalError());
+                }
+            }
+
+          // replace old list of constraints for this dof by the new one
+          line->entries.swap (new_entries);
+        }
+
+      // finally do the following check: if the sum of weights for the
+      // constraints is close to one, but not exactly one, then rescale all
+      // the weights so that they sum up to 1. this adds a little numerical
+      // stability and avoids all sorts of problems where the actual value
+      // is close to, but not quite what we expected
+      //
+      // the case where the weights don't quite sum up happens when we
+      // compute the interpolation weights "on the fly", i.e. not from
+      // precomputed tables. in this case, the interpolation weights are
+      // also subject to round-off
+      double sum = 0;
+      for (size_type i=0; i<line->entries.size(); ++i)
+        sum += line->entries[i].second;
+      if ((sum != 1.0) && (std::fabs (sum-1.) < 1.e-13))
+        {
+          for (size_type i=0; i<line->entries.size(); ++i)
+            line->entries[i].second /= sum;
+          line->inhomogeneity /= sum;
+        }
+    } // end of loop over all constraint lines
+
+#ifdef DEBUG
+  // if in debug mode: check that no dof is constrained to another dof that
+  // is also constrained. exclude dofs from this check whose constraint
+  // lines are not stored on the local processor
+  for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
+       line!=lines.end(); ++line)
+    for (ConstraintLine::Entries::const_iterator
+         entry=line->entries.begin();
+         entry!=line->entries.end(); ++entry)
+      if ((local_lines.size() == 0)
+          ||
+          (local_lines.is_element(entry->first)))
+        {
+          // make sure that entry->first is not the index of a line itself
+          const bool is_circle = is_constrained(entry->first);
+          Assert (is_circle == false,
+                  ExcDoFConstrainedToConstrainedDoF(line->index, entry->first));
+        }
+#endif
+
+  sorted = true;
+}
+
+
+
+void
+AffineConstraints::merge (const AffineConstraints &other_constraints,
+                          const MergeConflictBehavior merge_conflict_behavior,
+                          const bool allow_different_local_lines)
+{
+  (void) allow_different_local_lines;
+  Assert(allow_different_local_lines ||
+         local_lines == other_constraints.local_lines,
+         ExcMessage("local_lines for this and the other objects are not the same "
+                    "although allow_different_local_lines is false."));
+
+  // store the previous state with respect to sorting
+  const bool object_was_sorted = sorted;
+  sorted = false;
+
+  // first action is to fold into the present object possible constraints
+  // in the second object. we don't strictly need to do this any more since
+  // the AffineConstraints container has learned to deal with chains of
+  // constraints in the close() function, but we have traditionally done
+  // this and it's not overly hard to do.
+  //
+  // for this, loop over all constraints and replace the constraint lines
+  // with a new one where constraints are replaced if necessary.
+  ConstraintLine::Entries tmp;
+  for (std::vector<ConstraintLine>::iterator line=lines.begin();
+       line!=lines.end(); ++line)
+    {
+      tmp.clear ();
+      for (size_type i=0; i<line->entries.size(); ++i)
+        {
+          // if the present dof is not stored, or not constrained, or if we won't take the
+          // constraint from the other object, then simply copy it over
+          if ((other_constraints.local_lines.size() != 0
+               && other_constraints.local_lines.is_element(line->entries[i].first) == false)
+              ||
+              other_constraints.is_constrained(line->entries[i].first) == false
+              ||
+              ((merge_conflict_behavior != right_object_wins)
+               && other_constraints.is_constrained(line->entries[i].first)
+               && this->is_constrained(line->entries[i].first)))
+            tmp.push_back(line->entries[i]);
+          else
+            // otherwise resolve further constraints by replacing the old
+            // entry by a sequence of new entries taken from the other
+            // object, but with multiplied weights
+            {
+              const ConstraintLine::Entries *other_line
+                = other_constraints.get_constraint_entries (line->entries[i].first);
+              Assert (other_line != nullptr,
+                      ExcInternalError());
+
+              const double weight = line->entries[i].second;
+
+              for (ConstraintLine::Entries::const_iterator j=other_line->begin();
+                   j!=other_line->end(); ++j)
+                tmp.emplace_back(j->first, j->second*weight);
+
+              line->inhomogeneity
+              += other_constraints.get_inhomogeneity(line->entries[i].first) *
+                 weight;
+            }
+        }
+      // finally exchange old and newly resolved line
+      line->entries.swap (tmp);
+    }
+
+  if (local_lines.size() != 0)
+    local_lines.add_indices(other_constraints.local_lines);
+
+  {
+    // do not bother to resize the lines cache exactly since it is pretty
+    // cheap to adjust it along the way.
+    std::fill(lines_cache.begin(), lines_cache.end(), numbers::invalid_size_type);
+
+    // reset lines_cache for our own constraints
+    size_type index = 0;
+    for (std::vector<ConstraintLine>::const_iterator line = lines.begin();
+         line != lines.end(); ++line)
+      {
+        size_type local_line_no = calculate_line_index(line->index);
+        if (local_line_no >= lines_cache.size())
+          lines_cache.resize(local_line_no+1, numbers::invalid_size_type);
+        lines_cache[local_line_no] = index++;
+      }
+
+    // Add other_constraints to lines cache and our list of constraints
+    for (std::vector<ConstraintLine>::const_iterator line = other_constraints.lines.begin();
+         line != other_constraints.lines.end(); ++line)
+      {
+        const size_type local_line_no = calculate_line_index(line->index);
+        if (local_line_no >= lines_cache.size())
+          {
+            lines_cache.resize(local_line_no+1, numbers::invalid_size_type);
+            lines.push_back(*line);
+            lines_cache[local_line_no] = index++;
+          }
+        else if (lines_cache[local_line_no] == numbers::invalid_size_type)
+          {
+            // there are no constraints for that line yet
+            lines.push_back(*line);
+            AssertIndexRange(local_line_no, lines_cache.size());
+            lines_cache[local_line_no] = index++;
+          }
+        else
+          {
+            // we already store that line
+            switch (merge_conflict_behavior)
+              {
+              case no_conflicts_allowed:
+                AssertThrow (false,
+                             ExcDoFIsConstrainedFromBothObjects (line->index));
+                break;
+
+              case left_object_wins:
+                // ignore this constraint
+                break;
+
+              case right_object_wins:
+                AssertIndexRange(local_line_no, lines_cache.size());
+                lines[lines_cache[local_line_no]] = *line;
+                break;
+
+              default:
+                Assert (false, ExcNotImplemented());
+              }
+          }
+      }
+
+    // check that we set the pointers correctly
+    for (size_type i=0; i<lines_cache.size(); ++i)
+      if (lines_cache[i] != numbers::invalid_size_type)
+        Assert (i == calculate_line_index(lines[lines_cache[i]].index),
+                ExcInternalError());
+  }
+
+  // if the object was sorted before, then make sure it is so afterward as
+  // well. otherwise leave everything in the unsorted state
+  if (object_was_sorted == true)
+    close ();
+}
+
+
+
+void AffineConstraints::shift (const size_type offset)
+{
+  if (local_lines.size() == 0)
+    lines_cache.insert (lines_cache.begin(), offset,
+                        numbers::invalid_size_type);
+  else
+    {
+      // shift local_lines
+      IndexSet new_local_lines(local_lines.size());
+      new_local_lines.add_indices(local_lines, offset);
+      std::swap(local_lines, new_local_lines);
+    }
+
+  for (std::vector<ConstraintLine>::iterator i = lines.begin();
+       i != lines.end(); ++i)
+    {
+      i->index += offset;
+      for (ConstraintLine::Entries::iterator
+           j = i->entries.begin();
+           j != i->entries.end(); ++j)
+        j->first += offset;
+    }
+
+#ifdef DEBUG
+  // make sure that lines, lines_cache and local_lines
+  // are still linked correctly
+  for (size_type i=0; i<lines_cache.size(); ++i)
+    Assert(lines_cache[i] == numbers::invalid_size_type ||
+           calculate_line_index(lines[lines_cache[i]].index) == i,
+           ExcInternalError());
+#endif
+}
+
+
+
+void AffineConstraints::clear ()
+{
+  {
+    std::vector<ConstraintLine> tmp;
+    lines.swap (tmp);
+  }
+
+  {
+    std::vector<size_type> tmp;
+    lines_cache.swap (tmp);
+  }
+
+  sorted = false;
+}
+
+
+
+void AffineConstraints::reinit (const IndexSet &local_constraints)
+{
+  local_lines = local_constraints;
+
+  // make sure the IndexSet is compressed. Otherwise this can lead to crashes
+  // that are hard to find (only happen in release mode).
+  // see tests/mpi/affine_constraints_crash_01
+  local_lines.compress();
+
+  clear();
+}
+
+
+
+void AffineConstraints::condense (SparsityPattern &sparsity) const
+{
+  Assert (sorted == true, ExcMatrixNotClosed());
+  Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
+  Assert (sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
+
+  // store for each index whether it must be distributed or not. If entry
+  // is numbers::invalid_unsigned_int, no distribution is necessary.
+  // otherwise, the number states which line in the constraint matrix
+  // handles this index
+  std::vector<size_type> distribute(sparsity.n_rows(),
+                                    numbers::invalid_size_type);
+
+  for (size_type c=0; c<lines.size(); ++c)
+    distribute[lines[c].index] = c;
+
+  const size_type n_rows = sparsity.n_rows();
+  for (size_type row=0; row<n_rows; ++row)
+    {
+      if (distribute[row] == numbers::invalid_size_type)
+        {
+          // regular line. loop over cols all valid cols. note that this
+          // changes the line we are presently working on: we add additional
+          // entries. these are put to the end of the row. however, as
+          // constrained nodes cannot be constrained to other constrained
+          // nodes, nothing will happen if we run into these added nodes, as
+          // they can't be distributed further. we might store the position of
+          // the last old entry and stop work there, but since operating on
+          // the newly added ones only takes two comparisons (column index
+          // valid, distribute[column] necessarily
+          // ==numbers::invalid_size_type), it is cheaper to not do so and
+          // run right until the end of the line
+          for (SparsityPattern::iterator entry = sparsity.begin(row);
+               ((entry != sparsity.end(row)) &&
+                entry->is_valid_entry());
+               ++entry)
+            {
+              const size_type column = entry->column();
+
+              if (distribute[column] != numbers::invalid_size_type)
+                {
+                  // distribute entry at regular row @p{row} and irregular
+                  // column sparsity.colnums[j]
+                  for (size_type q=0;
+                       q!=lines[distribute[column]].entries.size();
+                       ++q)
+                    sparsity.add (row,
+                                  lines[distribute[column]].entries[q].first);
+                }
+            }
+        }
+      else
+        // row must be distributed. note that here the present row is not
+        // touched (unlike above)
+        {
+          for (SparsityPattern::iterator entry = sparsity.begin(row);
+               (entry != sparsity.end(row)) && entry->is_valid_entry(); ++entry)
+            {
+              const size_type column = entry->column();
+              if (distribute[column] == numbers::invalid_size_type)
+                // distribute entry at irregular row @p{row} and regular
+                // column sparsity.colnums[j]
+                for (size_type q=0;
+                     q!=lines[distribute[row]].entries.size(); ++q)
+                  sparsity.add (lines[distribute[row]].entries[q].first,
+                                column);
+              else
+                // distribute entry at irregular row @p{row} and irregular
+                // column sparsity.get_column_numbers()[j]
+                for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+                  for (size_type q=0;
+                       q!=lines[distribute[column]].entries.size(); ++q)
+                    sparsity.add (lines[distribute[row]].entries[p].first,
+                                  lines[distribute[column]].entries[q].first);
+            }
+        }
+    }
+
+  sparsity.compress();
+}
+
+
+
+
+void AffineConstraints::condense (DynamicSparsityPattern &sparsity) const
+{
+  Assert (sorted == true, ExcMatrixNotClosed());
+  Assert (sparsity.n_rows() == sparsity.n_cols(),
+          ExcNotQuadratic());
+
+  // store for each index whether it must be distributed or not. If entry
+  // is numbers::invalid_unsigned_int, no distribution is necessary.
+  // otherwise, the number states which line in the constraint matrix
+  // handles this index
+  std::vector<size_type> distribute(sparsity.n_rows(),
+                                    numbers::invalid_size_type);
+
+  for (size_type c=0; c<lines.size(); ++c)
+    distribute[lines[c].index] = c;
+
+  const size_type n_rows = sparsity.n_rows();
+  for (size_type row=0; row<n_rows; ++row)
+    {
+      if (distribute[row] == numbers::invalid_size_type)
+        // regular line. loop over cols. note that as we proceed to
+        // distribute cols, the loop may get longer
+        for (size_type j=0; j<sparsity.row_length(row); ++j)
+          {
+            const size_type column = sparsity.column_number(row,j);
+
+            if (distribute[column] != numbers::invalid_size_type)
+              {
+                // distribute entry at regular row @p{row} and irregular
+                // column column. note that this changes the line we are
+                // presently working on: we add additional entries. if we
+                // add another entry at a column behind the present one, we
+                // will encounter it later on (but since it can't be
+                // further constrained, won't have to do anything about
+                // it). if we add it up front of the present column, we
+                // will find the present column later on again as it was
+                // shifted back (again nothing happens, in particular no
+                // endless loop, as when we encounter it the second time we
+                // won't be able to add more entries as they all already
+                // exist, but we do the same work more often than
+                // necessary, and the loop gets longer), so move the cursor
+                // one to the right in the case that we add an entry up
+                // front that did not exist before. check whether it
+                // existed before by tracking the length of this row
+                size_type old_rowlength = sparsity.row_length(row);
+                for (size_type q=0;
+                     q!=lines[distribute[column]].entries.size();
+                     ++q)
+                  {
+                    const size_type
+                    new_col = lines[distribute[column]].entries[q].first;
+
+                    sparsity.add (row, new_col);
+
+                    const size_type new_rowlength = sparsity.row_length(row);
+                    if ((new_col < column) && (old_rowlength != new_rowlength))
+                      ++j;
+                    old_rowlength = new_rowlength;
+                  }
+              }
+          }
+      else
+        // row must be distributed
+        for (size_type j=0; j<sparsity.row_length(row); ++j)
+          {
+            const size_type column = sparsity.column_number(row,j);
+
+            if (distribute[column] == numbers::invalid_size_type)
+              // distribute entry at irregular row @p{row} and regular
+              // column sparsity.colnums[j]
+              for (size_type q=0;
+                   q!=lines[distribute[row]].entries.size(); ++q)
+                sparsity.add (lines[distribute[row]].entries[q].first,
+                              column);
+            else
+              // distribute entry at irregular row @p{row} and irregular
+              // column sparsity.get_column_numbers()[j]
+              for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+                for (size_type q=0;
+                     q!=lines[distribute[sparsity.column_number(row,j)]]
+                     .entries.size(); ++q)
+                  sparsity.add (lines[distribute[row]].entries[p].first,
+                                lines[distribute[sparsity.column_number(row,j)]]
+                                .entries[q].first);
+          }
+    }
+}
+
+
+
+void AffineConstraints::condense (BlockSparsityPattern &sparsity) const
+{
+  Assert (sorted == true, ExcMatrixNotClosed());
+  Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
+  Assert (sparsity.n_rows() == sparsity.n_cols(),
+          ExcNotQuadratic());
+  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
+          ExcNotQuadratic());
+  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
+          ExcNotQuadratic());
+
+  const BlockIndices &
+  index_mapping = sparsity.get_column_indices();
+
+  const size_type n_blocks = sparsity.n_block_rows();
+
+  // store for each index whether it must be distributed or not. If entry
+  // is numbers::invalid_unsigned_int, no distribution is necessary.
+  // otherwise, the number states which line in the constraint matrix
+  // handles this index
+  std::vector<size_type> distribute (sparsity.n_rows(),
+                                     numbers::invalid_size_type);
+
+  for (size_type c=0; c<lines.size(); ++c)
+    distribute[lines[c].index] = c;
+
+  const size_type n_rows = sparsity.n_rows();
+  for (size_type row=0; row<n_rows; ++row)
+    {
+      // get index of this row within the blocks
+      const std::pair<size_type,size_type>
+      block_index = index_mapping.global_to_local(row);
+      const size_type block_row = block_index.first;
+
+      if (distribute[row] == numbers::invalid_size_type)
+        // regular line. loop over all columns and see whether this column
+        // must be distributed
+        {
+
+          // to loop over all entries in this row, we have to loop over all
+          // blocks in this blockrow and the corresponding row therein
+          for (size_type block_col=0; block_col<n_blocks; ++block_col)
+            {
+              const SparsityPattern &
+              block_sparsity = sparsity.block(block_row, block_col);
+
+              for (SparsityPattern::const_iterator
+                   entry = block_sparsity.begin(block_index.second);
+                   (entry != block_sparsity.end(block_index.second)) &&
+                   entry->is_valid_entry();
+                   ++entry)
+                {
+                  const size_type global_col
+                    = index_mapping.local_to_global(block_col, entry->column());
+
+                  if (distribute[global_col] != numbers::invalid_size_type)
+                    // distribute entry at regular row @p{row} and
+                    // irregular column global_col
+                    {
+                      for (size_type q=0;
+                           q!=lines[distribute[global_col]].entries.size(); ++q)
+                        sparsity.add (row,
+                                      lines[distribute[global_col]].entries[q].first);
+                    }
+                }
+            }
+        }
+      else
+        {
+          // row must be distributed. split the whole row into the chunks
+          // defined by the blocks
+          for (size_type block_col=0; block_col<n_blocks; ++block_col)
+            {
+              const SparsityPattern &
+              block_sparsity = sparsity.block(block_row,block_col);
+
+              for (SparsityPattern::const_iterator
+                   entry = block_sparsity.begin(block_index.second);
+                   (entry != block_sparsity.end(block_index.second)) &&
+                   entry->is_valid_entry();
+                   ++entry)
+                {
+                  const size_type global_col
+                    = index_mapping.local_to_global (block_col, entry->column());
+
+                  if (distribute[global_col] == numbers::invalid_size_type)
+                    // distribute entry at irregular row @p{row} and
+                    // regular column global_col.
+                    {
+                      for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
+                        sparsity.add (lines[distribute[row]].entries[q].first, global_col);
+                    }
+                  else
+                    // distribute entry at irregular row @p{row} and
+                    // irregular column @p{global_col}
+                    {
+                      for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
+                        for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+                          sparsity.add (lines[distribute[row]].entries[p].first,
+                                        lines[distribute[global_col]].entries[q].first);
+                    }
+                }
+            }
+        }
+    }
+
+  sparsity.compress();
+}
+
+
+
+
+void AffineConstraints::condense (BlockDynamicSparsityPattern &sparsity) const
+{
+  Assert (sorted == true, ExcMatrixNotClosed());
+  Assert (sparsity.n_rows() == sparsity.n_cols(),
+          ExcNotQuadratic());
+  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
+          ExcNotQuadratic());
+  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
+          ExcNotQuadratic());
+
+  const BlockIndices &
+  index_mapping = sparsity.get_column_indices();
+
+  const size_type n_blocks = sparsity.n_block_rows();
+
+  // store for each index whether it must be distributed or not. If entry
+  // is numbers::invalid_unsigned_int, no distribution is necessary.
+  // otherwise, the number states which line in the constraint matrix
+  // handles this index
+  std::vector<size_type> distribute (sparsity.n_rows(),
+                                     numbers::invalid_size_type);
+
+  for (size_type c=0; c<lines.size(); ++c)
+    distribute[lines[c].index] = static_cast<signed int>(c);
+
+  const size_type n_rows = sparsity.n_rows();
+  for (size_type row=0; row<n_rows; ++row)
+    {
+      // get index of this row within the blocks
+      const std::pair<size_type,size_type>
+      block_index = index_mapping.global_to_local(row);
+      const size_type block_row = block_index.first;
+      const size_type local_row = block_index.second;
+
+      if (distribute[row] == numbers::invalid_size_type)
+        // regular line. loop over all columns and see whether this column
+        // must be distributed. note that as we proceed to distribute cols,
+        // the loop over cols may get longer.
+        //
+        // don't try to be clever here as in the algorithm for the
+        // DynamicSparsityPattern, as that would be much more
+        // complicated here. after all, we know that compressed patterns
+        // are inefficient...
+        {
+
+          // to loop over all entries in this row, we have to loop over all
+          // blocks in this blockrow and the corresponding row therein
+          for (size_type block_col=0; block_col<n_blocks; ++block_col)
+            {
+              const DynamicSparsityPattern &
+              block_sparsity = sparsity.block(block_row, block_col);
+
+              for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
+                {
+                  const size_type global_col
+                    = index_mapping.local_to_global(block_col,
+                                                    block_sparsity.column_number(local_row,j));
+
+                  if (distribute[global_col] != numbers::invalid_size_type)
+                    // distribute entry at regular row @p{row} and
+                    // irregular column global_col
+                    {
+                      for (size_type q=0;
+                           q!=lines[distribute[global_col]]
+                           .entries.size(); ++q)
+                        sparsity.add (row,
+                                      lines[distribute[global_col]].entries[q].first);
+                    }
+                }
+            }
+        }
+      else
+        {
+          // row must be distributed. split the whole row into the chunks
+          // defined by the blocks
+          for (size_type block_col=0; block_col<n_blocks; ++block_col)
+            {
+              const DynamicSparsityPattern &
+              block_sparsity = sparsity.block(block_row,block_col);
+
+              for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
+                {
+                  const size_type global_col
+                    = index_mapping.local_to_global (block_col,
+                                                     block_sparsity.column_number(local_row,j));
+
+                  if (distribute[global_col] == numbers::invalid_size_type)
+                    // distribute entry at irregular row @p{row} and
+                    // regular column global_col.
+                    {
+                      for (size_type q=0;
+                           q!=lines[distribute[row]].entries.size(); ++q)
+                        sparsity.add (lines[distribute[row]].entries[q].first,
+                                      global_col);
+                    }
+                  else
+                    // distribute entry at irregular row @p{row} and
+                    // irregular column @p{global_col}
+                    {
+                      for (size_type p=0;
+                           p!=lines[distribute[row]].entries.size(); ++p)
+                        for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
+                          sparsity.add (lines[distribute[row]].entries[p].first,
+                                        lines[distribute[global_col]].entries[q].first);
+                    }
+                }
+            }
+        }
+    }
+}
+
+
+
+bool AffineConstraints::is_identity_constrained (const size_type index) const
+{
+  if (is_constrained(index) == false)
+    return false;
+
+  const ConstraintLine &p = lines[lines_cache[calculate_line_index(index)]];
+  Assert (p.index == index, ExcInternalError());
+
+  // return if an entry for this line was found and if it has only one
+  // entry equal to 1.0
+  return ((p.entries.size() == 1) &&
+          (p.entries[0].second == 1.0));
+}
+
+
+bool AffineConstraints::are_identity_constrained (const size_type index1,
+                                                 const size_type index2) const
+{
+  if (is_constrained(index1) == true)
+    {
+      const ConstraintLine &p = lines[lines_cache[calculate_line_index(index1)]];
+      Assert (p.index == index1, ExcInternalError());
+
+      // return if an entry for this line was found and if it has only one
+      // entry equal to 1.0 and that one is index2
+      return ((p.entries.size() == 1) &&
+              (p.entries[0].first == index2) &&
+              (p.entries[0].second == 1.0));
+    }
+  else if (is_constrained(index2) == true)
+    {
+      const ConstraintLine &p = lines[lines_cache[calculate_line_index(index2)]];
+      Assert (p.index == index2, ExcInternalError());
+
+      // return if an entry for this line was found and if it has only one
+      // entry equal to 1.0 and that one is index1
+      return ((p.entries.size() == 1) &&
+              (p.entries[0].first == index1) &&
+              (p.entries[0].second == 1.0));
+    }
+  else
+    return false;
+}
+
+
+
+AffineConstraints::size_type
+AffineConstraints::max_constraint_indirections () const
+{
+  size_type return_value = 0;
+  for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
+       i!=lines.end(); ++i)
+    // use static cast, since typeof(size)==std::size_t, which is !=
+    // size_type on AIX
+    return_value = std::max(return_value,
+                            static_cast<size_type>(i->entries.size()));
+
+  return return_value;
+}
+
+
+
+bool AffineConstraints::has_inhomogeneities () const
+{
+  for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
+       i!=lines.end(); ++i)
+    if (i->inhomogeneity != 0.)
+      return true;
+
+  return false;
+}
+
+
+void AffineConstraints::print (std::ostream &out) const
+{
+  for (size_type i=0; i!=lines.size(); ++i)
+    {
+      // output the list of constraints as pairs of dofs and their weights
+      if (lines[i].entries.size() > 0)
+        {
+          for (size_type j=0; j<lines[i].entries.size(); ++j)
+            out << "    " << lines[i].index
+                << " " << lines[i].entries[j].first
+                << ":  " << lines[i].entries[j].second << "\n";
+
+          // print out inhomogeneity.
+          if (lines[i].inhomogeneity != 0)
+            out << "    " << lines[i].index
+                << ": " << lines[i].inhomogeneity << "\n";
+        }
+      else
+        // but also output something if the constraint simply reads
+        // x[13]=0, i.e. where the right hand side is not a linear
+        // combination of other dofs
+        {
+          if (lines[i].inhomogeneity != 0)
+            out << "    " << lines[i].index
+                << " = " << lines[i].inhomogeneity
+                << "\n";
+          else
+            out << "    " << lines[i].index << " = 0\n";
+        }
+    }
+
+  AssertThrow (out, ExcIO());
+}
+
+
+
+void
+AffineConstraints::write_dot (std::ostream &out) const
+{
+  out << "digraph constraints {"
+      << std::endl;
+  for (size_type i=0; i!=lines.size(); ++i)
+    {
+      // same concept as in the previous function
+      if (lines[i].entries.size() > 0)
+        for (size_type j=0; j<lines[i].entries.size(); ++j)
+          out << "  " << lines[i].index << "->" << lines[i].entries[j].first
+              << "; // weight: "
+              << lines[i].entries[j].second
+              << "\n";
+      else
+        out << "  " << lines[i].index << "\n";
+    }
+  out << "}" << std::endl;
+}
+
+
+
+std::size_t
+AffineConstraints::memory_consumption () const
+{
+  return (MemoryConsumption::memory_consumption (lines) +
+          MemoryConsumption::memory_consumption (lines_cache) +
+          MemoryConsumption::memory_consumption (sorted) +
+          MemoryConsumption::memory_consumption (local_lines));
+}
+
+
+
+void
+AffineConstraints::resolve_indices (std::vector<types::global_dof_index> &indices) const
+{
+  const unsigned int indices_size = indices.size();
+  const std::vector<std::pair<types::global_dof_index,double> > *line_ptr;
+  for (unsigned int i=0; i<indices_size; ++i)
+    {
+      line_ptr = get_constraint_entries(indices[i]);
+      // if the index is constraint, the constraints indices are added to the
+      // indices vector
+      if (line_ptr!=nullptr)
+        {
+          const unsigned int line_size = line_ptr->size();
+          for (unsigned int j=0; j<line_size; ++j)
+            indices.push_back((*line_ptr)[j].first);
+        }
+    }
+
+  // keep only the unique elements
+  std::sort(indices.begin(),indices.end());
+  std::vector<types::global_dof_index>::iterator it;
+  it = std::unique(indices.begin(),indices.end());
+  indices.resize(it-indices.begin());
+}
+
+
+
+// explicit instantiations
+//
+// define a list of functions for vectors and matrices, respectively, where
+// the vector/matrix can be replaced using a preprocessor variable
+// VectorType/MatrixType. note that we need a space between "VectorType" and
+// ">" to disambiguate ">>" when VectorType trails in an angle bracket
+
+// TODO: The way we define all the instantiations is probably not the very
+// best one. Try to find a better description.
+
+#define VECTOR_FUNCTIONS(VectorType) \
+  template void AffineConstraints::condense<VectorType >(const VectorType &uncondensed,\
+                                                        VectorType       &condensed) const;\
+  template void AffineConstraints::condense<VectorType >(VectorType &vec) const;\
+  template void AffineConstraints:: \
+  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type>            &, \
+                                           const std::vector<AffineConstraints::size_type> &, \
+                                           VectorType                                      &, \
+                                           const FullMatrix<VectorType::value_type>        &) const;\
+  template void AffineConstraints:: \
+  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type>            &, \
+                                           const std::vector<AffineConstraints::size_type> &, \
+                                           const std::vector<AffineConstraints::size_type> &, \
+                                           VectorType                                      &, \
+                                           const FullMatrix<VectorType::value_type>        &, \
+                                           bool) const
+
+#define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
+  template void AffineConstraints:: \
+  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type>            &, \
+                                           const std::vector<AffineConstraints::size_type> &, \
+                                           VectorType                                      &, \
+                                           const FullMatrix<VectorType::value_type>        &) const;\
+  template void AffineConstraints:: \
+  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type>            &, \
+                                           const std::vector<AffineConstraints::size_type> &, \
+                                           const std::vector<AffineConstraints::size_type> &, \
+                                           VectorType                                      &, \
+                                           const FullMatrix<VectorType::value_type>        &, \
+                                           bool) const
+
+#ifdef DEAL_II_WITH_PETSC
+VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
+VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);
+#endif
+
+#ifdef DEAL_II_WITH_TRILINOS
+PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::Vector);
+PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::BlockVector);
+#endif
+
+#define MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \
+  template void AffineConstraints:: \
+  distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<MatrixType::value_type>        &, \
+                                                      const Vector<VectorType::value_type>            &, \
+                                                      const std::vector<AffineConstraints::size_type> &, \
+                                                      MatrixType                                      &, \
+                                                      VectorType                                      &, \
+                                                      bool                                             , \
+                                                      std::integral_constant<bool, false>) const
+#define MATRIX_FUNCTIONS(MatrixType,VectorScalar) \
+  template void AffineConstraints:: \
+  distribute_local_to_global<MatrixType,Vector<VectorScalar> > (const FullMatrix<MatrixType::value_type> &, \
+      const Vector<VectorScalar>                                                                         &, \
+      const std::vector<AffineConstraints::size_type>                                                    &, \
+      MatrixType                                                                                         &, \
+      Vector<VectorScalar>                                                                               &, \
+      bool                                                                                                , \
+      std::integral_constant<bool, false>) const
+
+#define BLOCK_MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType)   \
+  template void AffineConstraints:: \
+  distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<MatrixType::value_type>        &, \
+                                                      const Vector<VectorType::value_type>            &, \
+                                                      const std::vector<AffineConstraints::size_type> &, \
+                                                      MatrixType                                      &, \
+                                                      VectorType                                      &, \
+                                                      bool                                             , \
+                                                      std::integral_constant<bool, true>) const
+#define BLOCK_MATRIX_FUNCTIONS(MatrixType)      \
+  template void AffineConstraints:: \
+  distribute_local_to_global<MatrixType,Vector<MatrixType::value_type> > (const FullMatrix<MatrixType::value_type> &, \
+      const Vector<MatrixType::value_type>                                                                         &, \
+      const std::vector<AffineConstraints::size_type>                                                              &, \
+      MatrixType                                                                                                   &, \
+      Vector<MatrixType::value_type>                                                                               &, \
+      bool                                                                                                          , \
+      std::integral_constant<bool, true>) const
+
+MATRIX_FUNCTIONS(FullMatrix<double>,double);
+MATRIX_FUNCTIONS(FullMatrix<float>,float);
+MATRIX_FUNCTIONS(FullMatrix<double>,std::complex<double>);
+MATRIX_FUNCTIONS(FullMatrix<std::complex<double> >,std::complex<double>);
+
+MATRIX_FUNCTIONS(SparseMatrix<double>,double);
+MATRIX_FUNCTIONS(SparseMatrix<float>,float);
+MATRIX_FUNCTIONS(SparseMatrix<double>,std::complex<double>);
+MATRIX_FUNCTIONS(SparseMatrix<float>,std::complex<float>);
+MATRIX_FUNCTIONS(SparseMatrix<std::complex<double> >,std::complex<double>);
+MATRIX_FUNCTIONS(SparseMatrix<std::complex<float> >,std::complex<float>);
+
+MATRIX_FUNCTIONS(SparseMatrixEZ<double>,double);
+MATRIX_FUNCTIONS(SparseMatrixEZ<float>,float);
+MATRIX_FUNCTIONS(ChunkSparseMatrix<double>,double);
+MATRIX_FUNCTIONS(ChunkSparseMatrix<float>,float);
+
+
+BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
+BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
+BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<double>, BlockVector<double>);
+BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<float>,  BlockVector<float>);
+
+// BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrixEZ<double>);
+// BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrixEZ<float>,  Vector<float>);
+
+#ifdef DEAL_II_WITH_PETSC
+MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix,PetscScalar);
+MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix,PetscScalar);
+BLOCK_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
+MATRIX_VECTOR_FUNCTIONS(PETScWrappers::MPI::SparseMatrix, PETScWrappers::MPI::Vector);
+MATRIX_VECTOR_FUNCTIONS(PETScWrappers::SparseMatrix, PETScWrappers::MPI::Vector);
+BLOCK_MATRIX_VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix,PETScWrappers::MPI::BlockVector);
+#endif
+
+#ifdef DEAL_II_WITH_TRILINOS
+MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix,double);
+BLOCK_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
+MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::SparseMatrix, TrilinosWrappers::MPI::Vector);
+BLOCK_MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix, TrilinosWrappers::MPI::BlockVector);
+#endif
+
+
+#define SPARSITY_FUNCTIONS(SparsityPatternType)                                       \
+  template void AffineConstraints::add_entries_local_to_global<SparsityPatternType> ( \
+      const std::vector<AffineConstraints::size_type> &,                              \
+      SparsityPatternType &,                                                          \
+      const bool,                                                                     \
+      const Table<2,bool> &,                                                          \
+      std::integral_constant<bool, false>) const;                                     \
+  template void AffineConstraints::add_entries_local_to_global<SparsityPatternType> ( \
+      const std::vector<AffineConstraints::size_type> &,                              \
+      const std::vector<AffineConstraints::size_type> &,                              \
+      SparsityPatternType &,                                                          \
+      const bool,                                                                     \
+      const Table<2,bool> &) const
+
+#define BLOCK_SPARSITY_FUNCTIONS(SparsityPatternType)                                 \
+  template void AffineConstraints::add_entries_local_to_global<SparsityPatternType> ( \
+      const std::vector<AffineConstraints::size_type> &,                              \
+      SparsityPatternType &,                                                          \
+      const bool,                                                                     \
+      const Table<2,bool> &,                                                          \
+      std::integral_constant<bool, true>) const;                                      \
+  template void AffineConstraints::add_entries_local_to_global<SparsityPatternType> ( \
+      const std::vector<AffineConstraints::size_type> &,                              \
+      const std::vector<AffineConstraints::size_type> &,                              \
+      SparsityPatternType &,                                                          \
+      const bool,                                                                     \
+      const Table<2,bool> &) const
+
+SPARSITY_FUNCTIONS(SparsityPattern);
+SPARSITY_FUNCTIONS(DynamicSparsityPattern);
+BLOCK_SPARSITY_FUNCTIONS(BlockSparsityPattern);
+BLOCK_SPARSITY_FUNCTIONS(BlockDynamicSparsityPattern);
+
+#ifdef DEAL_II_WITH_TRILINOS
+SPARSITY_FUNCTIONS(TrilinosWrappers::SparsityPattern);
+BLOCK_SPARSITY_FUNCTIONS(TrilinosWrappers::BlockSparsityPattern);
+#endif
+
+
+#define ONLY_MATRIX_FUNCTIONS(MatrixType)                                      \
+  template void AffineConstraints::distribute_local_to_global<MatrixType > (   \
+      const FullMatrix<MatrixType::value_type>        &,                       \
+      const std::vector<AffineConstraints::size_type> &,                       \
+      const std::vector<AffineConstraints::size_type> &,                       \
+      MatrixType                      &) const;                                \
+  template void AffineConstraints::distribute_local_to_global<MatrixType > (   \
+      const FullMatrix<MatrixType::value_type>        &,                       \
+      const std::vector<AffineConstraints::size_type> &,                       \
+      const AffineConstraints &,                                               \
+      const std::vector<AffineConstraints::size_type> &,                       \
+      MatrixType                      &) const
+
+ONLY_MATRIX_FUNCTIONS(FullMatrix<float>);
+ONLY_MATRIX_FUNCTIONS(FullMatrix<double>);
+ONLY_MATRIX_FUNCTIONS(SparseMatrix<float>);
+ONLY_MATRIX_FUNCTIONS(SparseMatrix<double>);
+ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<float> >);
+ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<double> >);
+ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
+ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
+
+#ifdef DEAL_II_WITH_TRILINOS
+ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
+ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
+#endif
+
+#ifdef DEAL_II_WITH_PETSC
+ONLY_MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
+ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
+ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
+#endif
+
+#include "affine_constraints.inst"
+
+// allocate scratch data. Cannot use the generic template instantiation
+// because we need to provide an initializer object of type
+// internals::AffineConstraintsData<Number> that can be passed to the
+// constructor of scratch_data (it won't allow one to be constructed in place).
+namespace internals
+{
+#define SCRATCH_INITIALIZER(MatrixScalar,VectorScalar,Name)                  \
+  AffineConstraintsData<MatrixScalar,VectorScalar>::ScratchData scratch_data_initializer_##Name; \
+  template <> Threads::ThreadLocalStorage<AffineConstraintsData<MatrixScalar,VectorScalar>::ScratchData> \
+  AffineConstraintsData<MatrixScalar,VectorScalar>::scratch_data(scratch_data_initializer_##Name)
+
+  SCRATCH_INITIALIZER(double,double,dd);
+  SCRATCH_INITIALIZER(float,float,ff);
+  SCRATCH_INITIALIZER(std::complex<double>,std::complex<double>,zz);
+  SCRATCH_INITIALIZER(std::complex<float>,std::complex<float>,cc);
+  SCRATCH_INITIALIZER(double,std::complex<double>,dz);
+  SCRATCH_INITIALIZER(float,std::complex<float>,fc);
+#undef SCRATCH_INITIALIZER
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/source/lac/affine_constraints.inst.in b/source/lac/affine_constraints.inst.in
new file mode 100644 (file)
index 0000000..a214045
--- /dev/null
@@ -0,0 +1,114 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2013 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES)
+{
+    template void AffineConstraints::condense<T<S> >(const T<S> &, T<S> &) const;
+    template void AffineConstraints::condense<T<S> >(T<S> &vec) const;
+    template void AffineConstraints::distribute_local_to_global<T<S> > (
+        const Vector<S>&, const std::vector<types::global_dof_index> &, T<S> &, const FullMatrix<S>&) const;
+    template void AffineConstraints::distribute_local_to_global<T<S> > (
+        const Vector<S>&, const std::vector<types::global_dof_index> &, const std::vector<types::global_dof_index> &, T<S> &, const FullMatrix<S>&, bool) const;
+    template void AffineConstraints::set_zero<T<S> >(T<S> &) const;
+}
+
+
+for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES)
+{
+    template void AffineConstraints::condense<LinearAlgebra::distributed::T<S> >(const LinearAlgebra::distributed::T<S> &, LinearAlgebra::distributed::T<S> &) const;
+    template void AffineConstraints::condense<LinearAlgebra::distributed::T<S> >(LinearAlgebra::distributed::T<S> &vec) const;
+
+    template
+    void
+    AffineConstraints::distribute_local_to_global<LinearAlgebra::distributed::T<S> >
+    (const Vector<S>&,
+     const std::vector<types::global_dof_index> &,
+     LinearAlgebra::distributed::T<S> &,
+     const FullMatrix<S>&) const;
+
+    template
+    void
+    AffineConstraints::distribute_local_to_global<LinearAlgebra::distributed::T<S> >
+    (const Vector<S>&,
+     const std::vector<types::global_dof_index> &,
+     const std::vector<types::global_dof_index> &,
+     LinearAlgebra::distributed::T<S> &,
+     const FullMatrix<S>&,
+     bool) const;
+
+    template
+    void
+    AffineConstraints::distribute_local_to_global<DiagonalMatrix<LinearAlgebra::distributed::T<S> > >
+    (const FullMatrix<S> &,
+     const std::vector< size_type > &,
+     DiagonalMatrix<LinearAlgebra::distributed::T<S> > &) const;
+
+    template
+    void
+    AffineConstraints::distribute_local_to_global<DiagonalMatrix<LinearAlgebra::distributed::T<S> >, LinearAlgebra::distributed::T<S> >
+    (const FullMatrix<S> &,
+     const Vector<S>&,
+     const std::vector< size_type > &,
+     DiagonalMatrix<LinearAlgebra::distributed::T<S> > &,
+     LinearAlgebra::distributed::T<S>&,
+     bool,
+     std::integral_constant<bool, false>) const;
+
+    template
+    void
+    AffineConstraints::distribute_local_to_global<DiagonalMatrix<LinearAlgebra::distributed::T<S> >, T<S> >
+    (const FullMatrix<S> &,
+     const Vector<S>&,
+     const std::vector< size_type > &,
+     DiagonalMatrix<LinearAlgebra::distributed::T<S> > &,
+     T<S>&,
+     bool,
+     std::integral_constant<bool, false>) const;
+
+    template
+    void
+    AffineConstraints::set_zero<LinearAlgebra::distributed::T<S> >(LinearAlgebra::distributed::T<S> &) const;
+}
+
+
+for (V: EXTERNAL_PARALLEL_VECTORS)
+{
+    template void AffineConstraints::set_zero<V >(V&) const;
+}
+
+
+for (S : REAL_SCALARS)
+{
+    template void AffineConstraints::condense<S>(SparseMatrix<S>&) const;
+    template void AffineConstraints::condense<S>(BlockSparseMatrix<S>&) const;
+}
+
+
+for (S1 : REAL_SCALARS; S2 : REAL_SCALARS)
+{
+    template void AffineConstraints::condense<S1,Vector<S2> >(SparseMatrix<S1>&, Vector<S2>&) const;
+    template void AffineConstraints::condense<S1,BlockVector<S2> >(BlockSparseMatrix<S1>&, BlockVector<S2>&) const;
+}
+
+for (S1 : COMPLEX_SCALARS)
+{
+    template void AffineConstraints::condense<S1,Vector<S1> >(SparseMatrix<S1>&, Vector<S1>&) const;
+}
+
+
+for (Vec : VECTOR_TYPES)
+{
+    template void AffineConstraints::distribute<Vec>(Vec &) const;
+}

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.