From: Matthias Maier Date: Thu, 24 May 2018 17:09:53 +0000 (-0500) Subject: lac: Add AffineConstraints class X-Git-Tag: v9.1.0-rc1~1067^2~39 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4c908dbb2f72b890b17b4034fecfb31630fead17;p=dealii.git lac: Add AffineConstraints class For now this is a verbatim copy of the ConstraintMatrix class. The idea is to templatify this class and after that switch the code base to it. --- diff --git a/include/deal.II/lac/affine_constraints.h b/include/deal.II/lac/affine_constraints.h new file mode 100644 index 0000000000..7fa45a9a54 --- /dev/null +++ b/include/deal.II/lac/affine_constraints.h @@ -0,0 +1,1978 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 1998 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#ifndef dealii_affine_constraints_h +#define dealii_affine_constraints_h + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include + + +DEAL_II_NAMESPACE_OPEN + +template class Table; +template class FullMatrix; +class SparsityPattern; +class DynamicSparsityPattern; +class BlockSparsityPattern; +class BlockDynamicSparsityPattern; +template class SparseMatrix; +template class BlockSparseMatrix; + +namespace internals +{ + class GlobalRowsFromLocal; +} + + +// TODO[WB]: We should have a function of the kind +// AffineConstraints::add_constraint (const size_type constrained_dof, +// const std::vector > &entries, +// const double inhomogeneity = 0); +// rather than building up constraints piecemeal through add_line/add_entry +// etc. This would also eliminate the possibility of accidentally changing +// existing constraints into something pointless, see the discussion on the +// mailing list on "Tiny bug in interpolate_boundary_values" in Sept. 2010. + +/** + * This class implements dealing with linear (possibly inhomogeneous) + * constraints on degrees of freedom. The concept and origin of such + * constraints is extensively described in the + * @ref constraints + * module. The class is meant to deal with a limited number of constraints + * relative to the total number of degrees of freedom, for example a few per + * cent up to maybe 30 per cent; and with a linear combination of M + * other degrees of freedom where M is also relatively small (no larger + * than at most around the average number of entries per row of a linear + * system). It is not meant to describe full rank linear systems. + * + * The algorithms used in the implementation of this class are described in + * some detail in the + * @ref hp_paper "hp paper". + * There is also a significant amount of documentation on how to use this + * class in the + * @ref constraints + * module. + * + * + *

Description of constraints

+ * + * Each "line" in objects of this class corresponds to one constrained degree + * of freedom, with the number of the line being i, entered by using + * add_line() or add_lines(). The entries in this line are pairs of the form + * (j,aij), which are added by add_entry() or + * add_entries(). The organization is essentially a SparsityPattern, but with + * only a few lines containing nonzero elements, and therefore no data wasted + * on the others. For each line, which has been added by the mechanism above, + * an elimination of the constrained degree of freedom of the form + * @f[ + * x_i = \sum_j a_{ij} x_j + b_i + * @f] + * is performed, where bi is optional and set by + * set_inhomogeneity(). Thus, if a constraint is formulated for instance as a + * zero mean value of several degrees of freedom, one of the degrees has to be + * chosen to be eliminated. + * + * Note that the constraints are linear in the xi, and that + * there might be a constant (non-homogeneous) term in the constraint. This is + * exactly the form we need for hanging node constraints, where we need to + * constrain one degree of freedom in terms of others. There are other + * conditions of this form possible, for example for implementing mean value + * conditions as is done in the step-11 tutorial program. The name of the + * class stems from the fact that these constraints can be represented in + * matrix form as X x = b, and this object then describes + * the matrix X and the vector b. The most frequent way to + * create/fill objects of this type is using the + * DoFTools::make_hanging_node_constraints() function. The use of these + * objects is first explained in step-6. + * + * Objects of the present type are organized in lines (rows), but only those + * lines are stored where constraints are present. New constraints are added + * by adding new lines using the add_line() function, and then populating it + * using the add_entry() function to a given line, or add_entries() to add + * more than one entry at a time. The right hand side element, if nonzero, can + * be set using the set_inhomogeneity() function. After all constraints have + * been added, you need to call close(), which compresses the storage format + * and sorts the entries. + * + * @note Many of the algorithms this class implements are discussed in the + * @ref hp_paper. + * The algorithms are also related to those shown in M. S. Shephard: Linear + * multipoint constraints applied via transformation as part of a direct + * stiffness assembly process. Int. J. Numer. Meth. Engrg., vol. 20 (1984), + * pp. 2107-2112., with the difference that the algorithms shown there + * completely eliminated constrained degrees of freedom, whereas we usually + * keep them as part of the linear system. + * + * @ingroup dofs + * @ingroup constraints + * @author Wolfgang Bangerth, Martin Kronbichler, 1998, 2004, 2008, 2009 + */ +class AffineConstraints : public Subscriptor +{ +public: + /** + * Declare the type for container size. + */ + typedef types::global_dof_index size_type; + + /** + * An enum that describes what should happen if the two AffineConstraints + * objects involved in a call to the merge() function happen to have + * constraints on the same degrees of freedom. + */ + enum MergeConflictBehavior + { + /** + * Throw an exception if the two objects concerned have conflicting + * constraints on the same degree of freedom. + */ + no_conflicts_allowed, + + /** + * In an operation cm1.merge(cm2), if cm1 and + * cm2 have constraints on the same degree of freedom, take + * the one from cm1. + */ + left_object_wins, + + /** + * In an operation cm1.merge(cm2), if cm1 and + * cm2 have constraints on the same degree of freedom, take + * the one from cm2. + */ + right_object_wins + }; + + /** + * Constructor. The supplied IndexSet defines which indices might be + * constrained inside this AffineConstraints container. In a calculation + * with a DoFHandler object based on parallel::distributed::Triangulation + * or parallel::shared::Triangulation, one should use the set of locally + * relevant dofs (see @ref GlossLocallyRelevantDof). + * + * The given IndexSet allows the AffineConstraints container to save + * memory by just not caring about degrees of freedom that are not of + * importance to the current processor. Alternatively, if no such + * IndexSet is provided, internal data structures for all possible + * indices will be created, leading to memory consumption on every + * processor that is proportional to the overall size of the + * problem, not just proportional to the size of the portion of the + * overall problem that is handled by the current processor. + */ + explicit AffineConstraints (const IndexSet &local_constraints = IndexSet()); + + /** + * Copy constructor + */ + explicit AffineConstraints (const AffineConstraints &affine_constraints); + + /** + * Move constructor + */ + AffineConstraints (AffineConstraints &&affine_constraints) = default; + + /** + * Copy operator. Like for many other large objects, this operator + * is deleted to avoid its inadvertent use in places such as + * accidentally declaring a @p AffineConstraints object as a + * function argument by value, rather than by reference. + * + * However, you can use the copy_from() function to explicitly + * copy AffineConstraints objects. + */ + AffineConstraints &operator= (const AffineConstraints &) = delete; + + /** + * Move assignment operator + */ + AffineConstraints &operator= (AffineConstraints &&affine_constraints) = default; + + /** + * Copy the given object to the current one. + * + * This function exists because @p operator=() is explicitly + * disabled. + */ + void copy_from (const AffineConstraints &other); + + /** + * clear() the AffineConstraints object and supply an IndexSet with lines + * that may be constrained. This function is only relevant in the + * distributed case to supply a different IndexSet. Otherwise this routine + * is equivalent to calling clear(). See the constructor for details. + */ + void reinit (const IndexSet &local_constraints = IndexSet()); + + /** + * Determines if we can store a constraint for the given @p line_index. This + * routine only matters in the distributed case and checks if the IndexSet + * allows storage of this line. Always returns true if not in the + * distributed case. + */ + bool can_store_line (const size_type line_index) const; + + /** + * Return the index set describing locally relevant lines if any are + * present. Note that if no local lines were given, this represents an empty + * IndexSet, whereas otherwise it contains the global problem size and the + * local range. + */ + const IndexSet &get_local_lines() const; + + /** + * This function copies the content of @p constraints_in with DoFs that are + * element of the IndexSet @p filter. Elements that are not present in the + * IndexSet are ignored. All DoFs will be transformed to local index space + * of the filter, both the constrained DoFs and the other DoFs these entries + * are constrained to. The local index space of the filter is a contiguous + * numbering of all (global) DoFs that are elements in the filter. + * + * If, for example, the filter represents the range [10,20), and + * the constraint matrix @p constraints_in includes the global indices + * {7,13,14}, the indices {3,4} are added to the calling + * constraint matrix (since 13 and 14 are elements in the filter and element + * 13 is the fourth element in the index, and 14 is the fifth). + * + * This function provides an easy way to create a AffineConstraints for + * certain vector components in a vector-valued problem from a full + * AffineConstraints, i.e. extracting a diagonal subblock from a larger + * AffineConstraints. The block is specified by the IndexSet argument. + */ + void add_selected_constraints (const AffineConstraints &constraints_in, + const IndexSet &filter); + + /** + * @name Adding constraints + * @{ + */ + + /** + * Add a new line to the matrix. If the line already exists, then the + * function simply returns without doing anything. + */ + void add_line (const size_type line); + + /** + * Call the first add_line() function for every index i for + * which lines[i] is true. + * + * This function essentially exists to allow adding several constraints of + * the form xi=0 all at once, where the set of indices + * i for which these constraints should be added are given by the + * argument of this function. On the other hand, just as if the single- + * argument add_line() function were called repeatedly, the constraints can + * later be modified to include linear dependencies using the add_entry() + * function as well as inhomogeneities using set_inhomogeneity(). + */ + void add_lines (const std::vector &lines); + + /** + * Call the first add_line() function for every index i that + * appears in the argument. + * + * This function essentially exists to allow adding several constraints of + * the form xi=0 all at once, where the set of indices + * i for which these constraints should be added are given by the + * argument of this function. On the other hand, just as if the single- + * argument add_line() function were called repeatedly, the constraints can + * later be modified to include linear dependencies using the add_entry() + * function as well as inhomogeneities using set_inhomogeneity(). + */ + void add_lines (const std::set &lines); + + /** + * Call the first add_line() function for every index i that + * appears in the argument. + * + * This function essentially exists to allow adding several constraints of + * the form xi=0 all at once, where the set of indices + * i for which these constraints should be added are given by the + * argument of this function. On the other hand, just as if the single- + * argument add_line() function were called repeatedly, the constraints can + * later be modified to include linear dependencies using the add_entry() + * function as well as inhomogeneities using set_inhomogeneity(). + */ + void add_lines (const IndexSet &lines); + + /** + * Add an entry to a given line. The list of lines is searched from the back + * to the front, so clever programming would add a new line (which is pushed + * to the back) and immediately afterwards fill the entries of that line. + * This way, no expensive searching is needed. + * + * If an entry with the same indices as the one this function call denotes + * already exists, then this function simply returns provided that the value + * of the entry is the same. Thus, it does no harm to enter a constraint + * twice. + */ + void add_entry (const size_type line, + const size_type column, + const double value); + + /** + * Add a whole series of entries, denoted by pairs of column indices and + * values, to a line of constraints. This function is equivalent to calling + * the preceding function several times, but is faster. + */ + void add_entries (const size_type line, + const std::vector > &col_val_pairs); + + /** + * Set an inhomogeneity to the constraint line i, according to the + * discussion in the general class description. + * + * @note the line needs to be added with one of the add_line() calls first. + */ + void set_inhomogeneity (const size_type line, + const double value); + + /** + * Close the filling of entries. Since the lines of a matrix of this type + * are usually filled in an arbitrary order and since we do not want to use + * associative constrainers to store the lines, we need to sort the lines + * and within the lines the columns before usage of the matrix. This is done + * through this function. + * + * Also, zero entries are discarded, since they are not needed. + * + * After closing, no more entries are accepted. If the object was already + * closed, then this function returns immediately. + * + * This function also resolves chains of constraints. For example, degree of + * freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ + * while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2} + * + \frac{u_4}{2}$. Then, the resolution will be that $u_{13} = + * \frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that + * cycles in this graph of constraints are not allowed, i.e. for example + * $u_4$ may not be constrained, directly or indirectly, to $u_{13}$ again. + */ + void close (); + + /** + * Merge the constraints represented by the object given as argument into + * the constraints represented by this object. Both objects may or may not + * be closed (by having their function close() called before). If this + * object was closed before, then it will be closed afterwards as well. + * Note, however, that if the other argument is closed, then merging may be + * significantly faster. + * + * Using the default value of the second arguments, the constraints in each + * of the two objects (the old one represented by this object and the + * argument) may not refer to the same degree of freedom, i.e. a degree of + * freedom that is constrained in one object may not be constrained in the + * second. If this is nevertheless the case, an exception is thrown. + * However, this behavior can be changed by providing a different value for + * the second argument. + * + * By default, merging two AffineConstraints objects that are initialized + * with different IndexSet objects is not allowed. + * This behavior can be altered by setting @p allow_different_local_lines + * appropriately. + * + * Merging a AffineConstraints that is initialized with an IndexSet + * and one that is not initialized with an IndexSet is not yet implemented. + */ + void merge (const AffineConstraints &other_constraints, + const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed, + const bool allow_different_local_lines = false); + + /** + * Shift all entries of this matrix down @p offset rows and over @p offset + * columns. If this object is initialized with an IndexSet, local_lines are + * shifted as well. + * + * This function is useful if you are building block matrices, where all + * blocks are built by the same DoFHandler object, i.e. the matrix size is + * larger than the number of degrees of freedom. Since several matrix rows + * and columns correspond to the same degrees of freedom, you'd generate + * several constraint objects, then shift them, and finally merge() them + * together again. + */ + void shift (const size_type offset); + + /** + * Clear all entries of this matrix. Reset the flag determining whether new + * entries are accepted or not. + * + * This function may be called also on objects which are empty or already + * cleared. + */ + void clear (); + + /** + * @} + */ + + + /** + * @name Querying constraints + * @{ + */ + + /** + * Return number of constraints stored in this matrix. + */ + size_type n_constraints () const; + + /** + * Return whether the degree of freedom with number @p index is a + * constrained one. + * + * Note that if close() was called before, then this function is + * significantly faster, since then the constrained degrees of freedom are + * sorted and we can do a binary search, while before close() was called, we + * have to perform a linear search through all entries. + */ + bool is_constrained (const size_type index) const; + + /** + * Return whether the dof is constrained, and whether it is constrained to + * only one other degree of freedom with weight one. The function therefore + * returns whether the degree of freedom would simply be eliminated in favor + * of exactly one other degree of freedom. + * + * The function returns @p false if either the degree of freedom is not + * constrained at all, or if it is constrained to more than one other degree + * of freedom, or if it is constrained to only one degree of freedom but + * with a weight different from one. + */ + bool is_identity_constrained (const size_type index) const; + + /** + * Return whether the two given degrees of freedom are linked by an equality + * constraint that either constrains index1 to be so that + * index1=index2 or constrains index2 so that + * index2=index1. + */ + bool are_identity_constrained (const size_type index1, + const size_type index2) const; + + /** + * Return the maximum number of other dofs that one dof is constrained to. + * For example, in 2d a hanging node is constrained only to its two + * neighbors, so the returned value would be 2. However, for higher order + * elements and/or higher dimensions, or other types of constraints, this + * number is no more obvious. + * + * The name indicates that within the system matrix, references to a + * constrained node are indirected to the nodes it is constrained to. + */ + size_type max_constraint_indirections () const; + + /** + * Return true in case the dof is constrained and there is a non- + * trivial inhomogeneous values set to the dof. + */ + bool is_inhomogeneously_constrained (const size_type index) const; + + /** + * Return false if all constraints in the AffineConstraints are + * homogeneous ones, and true if there is at least one + * inhomogeneity. + */ + bool has_inhomogeneities () const; + + /** + * Return a pointer to the vector of entries if a line is constrained, + * and a zero pointer in case the dof is not constrained. + */ + const std::vector > * + get_constraint_entries (const size_type line) const; + + /** + * Return the value of the inhomogeneity stored in the constrained dof @p + * line. Unconstrained dofs also return a zero value. + */ + double get_inhomogeneity (const size_type line) const; + + /** + * Print the constraints represented by the current object to the + * given stream. + * + * For each constraint of the form + * @f[ + * x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75 + * @f] + * this function will write a sequence of lines that look like this: + * @code + * 42 2 : 0.5 + * 42 14 : 0.25 + * 42 : 2.75 + * @endcode + * The last line is only shown if the inhomogeneity (here: 2.75) is + * nonzero. + * + * A block of lines such as the one above is repeated for each + * constrained degree of freedom. + */ + void print (std::ostream &out) const; + + /** + * Write the graph of constraints in 'dot' format. 'dot' is a program that + * can take a list of nodes and produce a graphical representation of the + * graph of constrained degrees of freedom and the degrees of freedom they + * are constrained to. + * + * The output of this function can be used as input to the 'dot' program + * that can convert the graph into a graphical representation in postscript, + * png, xfig, and a number of other formats. + * + * This function exists mostly for debugging purposes. + */ + void write_dot (std::ostream &) const; + + /** + * Determine an estimate for the memory consumption (in bytes) of this + * object. + */ + std::size_t memory_consumption () const; + + /** + * Add the constraint indices associated to the indices in the given vector. + * After a call to this function, the indices vector contains the initial + * elements and all the associated constrained indices. This function sorts + * the elements and suppresses duplicates. + */ + void resolve_indices(std::vector &indices) const; + + /** + * @} + */ + + /** + * @name Eliminating constraints from linear systems after their creation + * @{ + */ + + + /** + * Condense a sparsity pattern. The name of the function mimics the name of + * the function we use to condense linear systems, but it is a bit of a + * misnomer for the current context. This is because in the context of + * linear systems, we eliminate certain rows and columns of the linear + * system, i.e., we "reduce" or "condense" the linear system. On the other + * hand, in the current context, the functions does not remove nonzero + * entries from the sparsity pattern. Rather, it adds those nonzero entry + * locations to the sparsity pattern that will later be needed for the + * process of condensation of constrained degrees of freedom from a linear + * system. + * + * Since this function adds new nonzero entries to the sparsity pattern, the + * given sparsity pattern must not be compressed. The constraint matrix + * (i.e., the current object) must be closed. The sparsity pattern is + * compressed at the end of the function. + */ + void condense (SparsityPattern &sparsity) const; + + /** + * Same function as above, but condenses square block sparsity patterns. + */ + void condense (BlockSparsityPattern &sparsity) const; + + /** + * Same function as above, but condenses square compressed sparsity + * patterns. + */ + void condense (DynamicSparsityPattern &sparsity) const; + + /** + * Same function as above, but condenses square compressed sparsity + * patterns. + */ + void condense (BlockDynamicSparsityPattern &sparsity) const; + + /** + * Condense a given matrix, i.e., eliminate the rows and columns of the + * matrix that correspond to constrained degrees of freedom. + * + * See the general documentation of this class for more detailed + * information. + */ + template + void condense (SparseMatrix &matrix) const; + + /** + * Same function as above, but condenses square block sparse matrices. + */ + template + void condense (BlockSparseMatrix &matrix) const; + + /** + * Condense the given vector in-place. The @p VectorType may be a + * Vector, Vector, BlockVector<...>, a PETSc or + * Trilinos vector wrapper class, or any other type having the same + * interface. Note that this function does not take any inhomogeneity into + * account and throws an exception in case there are any inhomogeneities. + * Use the function using both a matrix and vector for that case. + * + * @note This function does not work for MPI vectors. Use condense() with + * two vector arguments instead. + */ + template + void condense (VectorType &vec) const; + + /** + * The function copies and condenses values from @p vec_ghosted into @p + * output. In a serial code it is equivalent to calling condense (vec). If + * called in parallel, @p vec_ghosted is supposed to contain ghost elements + * while @p output should not. + */ + template + void condense (const VectorType &vec_ghosted, + VectorType &output) const; + + /** + * Condense a given matrix and a given vector by eliminating rows and + * columns of the linear system that correspond to constrained degrees of + * freedom. The sparsity pattern associated with the matrix needs to be + * condensed and compressed. This function is the appropriate choice for + * applying inhomogeneous constraints. + * + * The constraint matrix object must be closed to call this function. + * + * See the general documentation of this class for more detailed + * information. + */ + template + void condense (SparseMatrix &matrix, + VectorType &vector) const; + + /** + * Same function as above, but condenses square block sparse matrices and + * vectors. + */ + template + void condense (BlockSparseMatrix &matrix, + BlockVectorType &vector) const; + + /** + * Set the values of all constrained DoFs in a vector to zero. The @p + * VectorType may be a Vector, Vector, + * BlockVector<...>, a PETSc or Trilinos vector wrapper class, or + * any other type having the same interface. + */ + template + void set_zero (VectorType &vec) const; + + /** + * @} + */ + + /** + * @name Eliminating constraints from linear systems during their creation + * @{ + */ + + /** + * This function takes a vector of local contributions (@p local_vector) + * corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global vector. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as @p local_vector and @p + * local_dof_indices have the same number of elements, this function is + * happy with whatever it is given. + * + * In contrast to the similar function in the DoFAccessor class, this + * function also takes care of constraints, i.e. if one of the elements of + * @p local_dof_indices belongs to a constrained node, then rather than + * writing the corresponding element of @p local_vector into @p + * global_vector, the element is distributed to the entries in the global + * vector to which this particular degree of freedom is constrained. + * + * Thus, by using this function to distribute local contributions to the + * global object, one saves the call to the condense function after the + * vectors and matrices are fully assembled. On the other hand, by + * consequence, the function does not only write into the entries enumerated + * by the @p local_dof_indices array, but also (possibly) others as + * necessary. + * + * Note that this function will apply all constraints as if they were + * homogeneous. For correctly setting inhomogeneous constraints, use the + * similar function with a matrix argument or the function with both matrix + * and vector arguments. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global vector allows for + * simultaneous access and the access is not to rows with the same global + * index at the same time. This needs to be made sure from the caller's + * site. There is no locking mechanism inside this method to prevent data + * races. + * + * @param[in] local_vector Vector of local contributions. + * @param[in] local_dof_indices Local degrees of freedom indices + * corresponding to the vector of local contributions. + * @param[out] global_vector The global vector to which all local + * contributions will be added. + */ + template + void + distribute_local_to_global (const InVector &local_vector, + const std::vector &local_dof_indices, + OutVector &global_vector) const; + + /** + * This function takes a vector of local contributions (@p local_vector) + * corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global vector. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as @p local_vector and @p + * local_dof_indices have the same number of elements, this function is + * happy with whatever it is given. + * + * In contrast to the similar function in the DoFAccessor class, this + * function also takes care of constraints, i.e. if one of the elements of + * @p local_dof_indices belongs to a constrained node, then rather than + * writing the corresponding element of @p local_vector into @p + * global_vector, the element is distributed to the entries in the global + * vector to which this particular degree of freedom is constrained. + * + * Thus, by using this function to distribute local contributions to the + * global object, one saves the call to the condense function after the + * vectors and matrices are fully assembled. On the other hand, by + * consequence, the function does not only write into the entries enumerated + * by the @p local_dof_indices array, but also (possibly) others as + * necessary. This includes writing into diagonal elements of the matrix if + * the corresponding degree of freedom is constrained. + * + * The fourth argument local_matrix is intended to be used in case + * one wants to apply inhomogeneous constraints on the vector only. Such a + * situation could be where one wants to assemble of a right hand side + * vector on a problem with inhomogeneous constraints, but the global matrix + * has been assembled previously. A typical example of this is a time + * stepping algorithm where the stiffness matrix is assembled once, and the + * right hand side updated every time step. Note that, however, the entries + * in the columns of the local matrix have to be exactly the same as those + * that have been written into the global matrix. Otherwise, this function + * will not be able to correctly handle inhomogeneities. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global vector allows for + * simultaneous access and the access is not to rows with the same global + * index at the same time. This needs to be made sure from the caller's + * site. There is no locking mechanism inside this method to prevent data + * races. + */ + template + void + distribute_local_to_global (const Vector &local_vector, + const std::vector &local_dof_indices, + VectorType &global_vector, + const FullMatrix &local_matrix) const; + + /** + * Same as the previous function, except that it uses two (possibly) different + * index sets to correctly handle inhomogeneities when the local matrix is + * computed from a combination of two neighboring elements, for example for an + * edge integral term in DG. Note that in the case that these two elements have + * different polynomial degree, the local matrix is rectangular. + * + * local_dof_indices_row is the set of row indices and + * local_dof_indices_col is the set of column indices of the local matrix. + * diagonal=false says whether the two index sets are equal or not. + * + * If both index sets are equal, diagonal must be set to true or we + * simply use the previous function. If both index sets are different (diagonal=false) + * the global_vector is modified to handle inhomogeneities but no + * entries from local_vector are added. Note that the edge integrals for inner + * edged for DG do not contribute any values to the right hand side. + */ + template + void + distribute_local_to_global (const Vector &local_vector, + const std::vector &local_dof_indices_row, + const std::vector &local_dof_indices_col, + VectorType &global_vector, + const FullMatrix &local_matrix, + bool diagonal = false) const; + + /** + * Enter a single value into a result vector, obeying constraints. + */ + template + void + distribute_local_to_global (const size_type index, + const double value, + VectorType &global_vector) const; + + /** + * This function takes a pointer to a vector of local contributions (@p + * local_vector) corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global vector. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as the entries in @p + * local_dof_indices indicate reasonable global vector entries, this + * function is happy with whatever it is given. + * + * If one of the elements of @p local_dof_indices belongs to a constrained + * node, then rather than writing the corresponding element of @p + * local_vector into @p global_vector, the element is distributed to the + * entries in the global vector to which this particular degree of freedom + * is constrained. + * + * Thus, by using this function to distribute local contributions to the + * global object, one saves the call to the condense function after the + * vectors and matrices are fully assembled. Note that this function + * completely ignores inhomogeneous constraints. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global vector allows for + * simultaneous access and the access is not to rows with the same global + * index at the same time. This needs to be made sure from the caller's + * site. There is no locking mechanism inside this method to prevent data + * races. + */ + template + void + distribute_local_to_global (ForwardIteratorVec local_vector_begin, + ForwardIteratorVec local_vector_end, + ForwardIteratorInd local_indices_begin, + VectorType &global_vector) const; + + /** + * This function takes a matrix of local contributions (@p local_matrix) + * corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global matrix. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as @p local_matrix and @p + * local_dof_indices have the same number of elements, this function is + * happy with whatever it is given. + * + * In contrast to the similar function in the DoFAccessor class, this + * function also takes care of constraints, i.e. if one of the elements of + * @p local_dof_indices belongs to a constrained node, then rather than + * writing the corresponding element of @p local_matrix into @p + * global_matrix, the element is distributed to the entries in the global + * matrix to which this particular degree of freedom is constrained. + * + * With this scheme, we never write into rows or columns of constrained + * degrees of freedom. In order to make sure that the resulting matrix can + * still be inverted, we need to do something with the diagonal elements + * corresponding to constrained nodes. Thus, if a degree of freedom in @p + * local_dof_indices is constrained, we distribute the corresponding entries + * in the matrix, but also add the absolute value of the diagonal entry of + * the local matrix to the corresponding entry in the global matrix. + * Assuming the discretized operator is positive definite, this guarantees + * that the diagonal entry is always non-zero, positive, and of the same + * order of magnitude as the other entries of the matrix. On the other hand, + * when solving a source problem $Au=f$ the exact value of the diagonal + * element is not important, since the value of the respective degree of + * freedom will be overwritten by the distribute() call later on anyway. + * + * @note The procedure described above adds an unforeseeable number of + * artificial eigenvalues to the spectrum of the matrix. Therefore, it is + * recommended to use the equivalent function with two local index vectors + * in such a case. + * + * By using this function to distribute local contributions to the global + * object, one saves the call to the condense function after the vectors and + * matrices are fully assembled. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global matrix allows for + * simultaneous access and the access is not to rows with the same global + * index at the same time. This needs to be made sure from the caller's + * site. There is no locking mechanism inside this method to prevent data + * races. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const std::vector &local_dof_indices, + MatrixType &global_matrix) const; + + /** + * Does almost the same as the function above but can treat general + * rectangular matrices. The main difference to achieve this is that the + * diagonal entries in constrained rows are left untouched instead of being + * filled with arbitrary values. + * + * Since the diagonal entries corresponding to eliminated degrees of freedom + * are not set, the result may have a zero eigenvalue, if applied to a + * square matrix. This has to be considered when solving the resulting + * problems. For solving a source problem $Au=f$, it is possible to set the + * diagonal entry after building the matrix by a piece of code of the form + * + * @code + * for (unsigned int i=0;i + void + distribute_local_to_global (const FullMatrix &local_matrix, + const std::vector &row_indices, + const std::vector &col_indices, + MatrixType &global_matrix) const; + + /** + * Does almost the same as the function above for general rectangular + * matrices but uses different AffineConstraints objects on the row and + * column indices. The convention is that row indices are constrained + * according to the calling AffineConstraints *this, whereas + * column indices are constrained according to the given AffineConstraints + * column_affine_constraints. This function allows to handle the + * case where rows and columns of a matrix are represented by different + * function spaces with their own enumeration of indices, as e.g. in mixed + * finite element problems with separate DoFHandler objects or for flux + * matrices between different levels in multigrid methods. + * + * Like the other method with separate slots for row and column indices, + * this method does not add diagonal entries to eliminated degrees of + * freedom. See there for a more elaborate description. + */ + template + void distribute_local_to_global( + const FullMatrix &local_matrix, + const std::vector &row_indices, + const AffineConstraints &column_affine_constraints, + const std::vector &column_indices, + MatrixType &global_matrix) const; + + /** + * This function simultaneously writes elements into matrix and vector, + * according to the constraints specified by the calling AffineConstraints. + * This function can correctly handle inhomogeneous constraints as well. For + * the parameter use_inhomogeneities_for_rhs see the documentation in + * @ref constraints + * module. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global matrix and vector allow + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs = false) const; + + /** + * Do a similar operation as the distribute_local_to_global() function that + * distributes writing entries into a matrix for constrained degrees of + * freedom, except that here we don't write into a matrix but only allocate + * sparsity pattern entries. + * + * As explained in the + * @ref hp_paper "hp paper" + * and in step-27, first allocating a sparsity pattern and later coming back + * and allocating additional entries for those matrix entries that will be + * written to due to the elimination of constrained degrees of freedom + * (using AffineConstraints::condense() ), can be a very expensive procedure. + * It is cheaper to allocate these entries right away without having to do a + * second pass over the sparsity pattern object. This function does exactly + * that. + * + * Because the function only allocates entries in a sparsity pattern, all it + * needs to know are the degrees of freedom that couple to each other. + * Unlike the previous function, no actual values are written, so the second + * input argument is not necessary here. + * + * The third argument to this function, keep_constrained_entries determines + * whether the function shall allocate entries in the sparsity pattern at + * all for entries that will later be set to zero upon condensation of the + * matrix. These entries are necessary if the matrix is built unconstrained, + * and only later condensed. They are not necessary if the matrix is built + * using the distribute_local_to_global() function of this class which + * distributes entries right away when copying a local matrix into a global + * object. The default of this argument is true, meaning to allocate the few + * entries that may later be set to zero. + * + * By default, the function adds entries for all pairs of indices given in + * the first argument to the sparsity pattern (unless + * keep_constrained_entries is false). However, sometimes one would like to + * only add a subset of all of these pairs. In that case, the last argument + * can be used which specifies a boolean mask which of the pairs of indices + * should be considered. If the mask is false for a pair of indices, then no + * entry will be added to the sparsity pattern for this pair, irrespective + * of whether one or both of the indices correspond to constrained degrees + * of freedom. + * + * This function is not typically called from user code, but is used in the + * DoFTools::make_sparsity_pattern() function when passed a constraint + * matrix object. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global sparsity pattern allows + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries = true, + const Table<2,bool> &dof_mask = default_empty_table) const; + + /** + * Similar to the other function, but for non-quadratic sparsity patterns. + */ + template + void + add_entries_local_to_global (const std::vector &row_indices, + const std::vector &col_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries = true, + const Table<2,bool> &dof_mask = default_empty_table) const; + + /** + * This function imports values from a global vector (@p global_vector) by + * applying the constraints to a vector of local values, expressed in + * iterator format. In most cases, the local values will be identified by + * the local dof values on a cell. However, as long as the entries in @p + * local_dof_indices indicate reasonable global vector entries, this + * function is happy with whatever it is given. + * + * If one of the elements of @p local_dof_indices belongs to a constrained + * node, then rather than writing the corresponding element of @p + * global_vector into @p local_vector, the constraints are resolved as the + * respective distribute function does, i.e., the local entry is constructed + * from the global entries to which this particular degree of freedom is + * constrained. + * + * In contrast to the similar function get_dof_values in the DoFAccessor + * class, this function does not need the constrained values to be correctly + * set (i.e., distribute to be called). + */ + template + void + get_dof_values (const VectorType &global_vector, + ForwardIteratorInd local_indices_begin, + ForwardIteratorVec local_vector_begin, + ForwardIteratorVec local_vector_end) const; + + /** + * @} + */ + + /** + * @name Dealing with constraints after solving a linear system + * @{ + */ + + /** + * Given a vector, set all constrained degrees of freedom to values so + * that the constraints are satisfied. For example, if the current object + * stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this + * function will read the values of $x_1$ and $x_1$ from the given vector + * and set the element $x_3$ according to this constraints. Similarly, if + * the current object stores the constraint $x_{42}=208$, then this + * function will set the 42nd element of the given vector to 208. + * + * @note If this function is called with a parallel vector @p vec, then the + * vector must not contain ghost elements. + */ + template + void distribute (VectorType &vec) const; + + /** + * @} + */ + + + + /** + * This class represents one line of a constraint matrix. + */ + struct ConstraintLine + { + /** + * A data type in which we store the list of entries that make up the + * homogenous part of a constraint. + */ + typedef std::vector > Entries; + + /** + * Global DoF index of this line. Since only very few lines are stored, + * we can not assume a specific order and have to store the index + * explicitly. + */ + size_type index; + + /** + * Row numbers and values of the entries in this line. + * + * For the reason why we use a vector instead of a map and the + * consequences thereof, the same applies as what is said for + * AffineConstraints::lines. + */ + Entries entries; + + /** + * Value of the inhomogeneity. + */ + double inhomogeneity; + + /** + * This operator is a bit weird and unintuitive: it compares the line + * numbers of two lines. We need this to sort the lines; in fact we could + * do this using a comparison predicate. However, this way, it is easier, + * albeit unintuitive since two lines really have no god-given order + * relation. + */ + bool operator < (const ConstraintLine &) const; + + /** + * This operator is likewise weird: it checks whether the line indices of + * the two operands are equal, irrespective of the fact that the contents + * of the line may be different. + */ + bool operator == (const ConstraintLine &) const; + + /** + * Determine an estimate for the memory consumption (in bytes) of this + * object. + */ + std::size_t memory_consumption () const; + + /** + * Support for boost:serialization. + */ + template + void serialize(Archive &ar, const unsigned int) + { + ar &index &entries &inhomogeneity; + } + + }; + + + /** + * Typedef for the iterator type that is used in the LineRange container. + */ + typedef std::vector::const_iterator const_iterator; + + + /** + * Typedef for the return type used by get_lines(). + */ + typedef boost::iterator_range LineRange; + + + /** + * Return a range object containing (const) iterators to all line entries + * stored in the AffineConstraints container. Such a range is useful to + * initialize range-based for loops as supported by C++11. + * + * @return A range object for the half open range [this->begin(), + * this->end()) of line entries. + */ + const LineRange get_lines() const; + + + /** + * Check if the current object is consistent on all processors + * in a distributed computation. + * + * This method checks if all processors agree on the constraints for their + * local lines as given by @p locally_active_dofs. This method is a collective + * operation and will return @p true only if all processors are consistent. + * + * Please supply the owned DoFs per processor as returned by + * DoFHandler::locally_owned_dofs_per_processor() as @p locally_owned_dofs + * and the result of DoFTools::extract_locally_active_dofs() as + * @p locally_active_dofs. The + * former is used to determine ownership of the specific DoF, while the latter + * is used as the set of rows that need to be checked. + * + * If @p verbose is set to @p true, additional debug information is written + * to std::cout. + * + * @note This method exchanges all constraint information of locally active + * lines and is as such slow for large computations and should probably + * only be used in debug mode. We do not check all lines returned by + * get_local_lines() but only the locally active ones, as we allow processors + * to not know about some locally relevant rows. + * + * @return Whether all AffineConstraints objects are consistent. Returns + * the same value on all processors. + */ + bool is_consistent_in_parallel(const std::vector &locally_owned_dofs, + const IndexSet &locally_active_dofs, + const MPI_Comm mpi_communicator, + const bool verbose=false) const; + + + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcMatrixIsClosed); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcMatrixNotClosed); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException1 (ExcLineInexistant, + size_type, + << "The specified line " << arg1 + << " does not exist."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException4 (ExcEntryAlreadyExists, + size_type, size_type, double, double, + << "The entry for the indices " << arg1 << " and " + << arg2 << " already exists, but the values " + << arg3 << " (old) and " << arg4 << " (new) differ " + << "by " << (arg4-arg3) << "."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException2 (ExcDoFConstrainedToConstrainedDoF, + int, int, + << "You tried to constrain DoF " << arg1 + << " to DoF " << arg2 + << ", but that one is also constrained. This is not allowed!"); + /** + * Exception. + * + * @ingroup Exceptions + */ + DeclException1 (ExcDoFIsConstrainedFromBothObjects, + size_type, + << "Degree of freedom " << arg1 + << " is constrained from both object in a merge operation."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException1 (ExcDoFIsConstrainedToConstrainedDoF, + size_type, + << "In the given argument a degree of freedom is constrained " + << "to another DoF with number " << arg1 + << ", which however is constrained by this object. This is not" + << " allowed."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException1 (ExcRowNotStoredHere, + size_type, + << "The index set given to this constraint matrix indicates " + << "constraints for degree of freedom " << arg1 + << " should not be stored by this object, but a constraint " + << "is being added."); + + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException2 (ExcColumnNotStoredHere, + size_type, + size_type, + << "The index set given to this constraint matrix indicates " + << "constraints using degree of freedom " << arg2 + << " should not be stored by this object, but a constraint " + << "for degree of freedom " << arg1 <<" uses it."); + + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException2 (ExcIncorrectConstraint, + int, int, + << "While distributing the constraint for DoF " + << arg1 << ", it turns out that one of the processors " + << "who own the " << arg2 + << " degrees of freedom that x_" << arg1 + << " is constrained against does not know about " + << "the constraint on x_" << arg1 + << ". Did you not initialize the AffineConstraints container " + << "with the appropriate locally_relevant set so " + << "that every processor who owns a DoF that constrains " + << "another DoF also knows about this constraint?"); + +private: + + /** + * Store the lines of the matrix. Entries are usually appended in an + * arbitrary order and insertion into a vector is done best at the end, so + * the order is unspecified after all entries are inserted. Sorting of the + * entries takes place when calling the close() function. + * + * We could, instead of using a vector, use an associative array, like a map + * to store the lines. This, however, would mean a much more fragmented heap + * since it allocates many small objects, and would additionally make usage + * of this matrix much slower. + */ + std::vector lines; + + /** + * A list of size_type that contains the position of the ConstraintLine of a + * constrained degree of freedom, or numbers::invalid_size_type if the + * degree of freedom is not constrained. The numbers::invalid_size_type + * return value returns thus whether there is a constraint line for a given + * degree of freedom index. Note that this class has no notion of how many + * degrees of freedom there really are, so if we check whether there is a + * constraint line for a given degree of freedom, then this vector may + * actually be shorter than the index of the DoF we check for. + * + * This field exists since when adding a new constraint line we have to + * figure out whether it already exists. Previously, we would simply walk + * the unsorted list of constraint lines until we either hit the end or + * found it. This algorithm is O(N) if N is the number of constraints, which + * makes it O(N^2) when inserting all constraints. For large problems with + * many constraints, this could easily take 5-10 per cent of the total run + * time. With this field, we can save this time since we find any constraint + * in O(1) time or get to know that it a certain degree of freedom is not + * constrained. + * + * To make things worse, traversing the list of existing constraints + * requires reads from many different places in memory. Thus, in large 3d + * applications, the add_line() function showed up very prominently in the + * overall compute time, mainly because it generated a lot of cache misses. + * This should also be fixed by using the O(1) algorithm to access the + * fields of this array. + * + * The field is useful in a number of other contexts as well, e.g. when one + * needs random access to the constraints as in all the functions that apply + * constraints on the fly while add cell contributions into vectors and + * matrices. + */ + std::vector lines_cache; + + /** + * This IndexSet is used to limit the lines to save in the AffineConstraints + * to a subset. This is necessary, because the lines_cache vector would + * become too big in a distributed calculation. + */ + IndexSet local_lines; + + /** + * Store whether the arrays are sorted. If so, no new entries can be added. + */ + bool sorted; + + /** + * Internal function to calculate the index of line @p line in the vector + * lines_cache using local_lines. + */ + size_type calculate_line_index (const size_type line) const; + + /** + * Return @p true if the weight of an entry (the second element of the pair) + * equals zero. This function is used to delete entries with zero weight. + */ + static bool check_zero_weight (const std::pair &p); + + /** + * Dummy table that serves as default argument for function + * add_entries_local_to_global(). + */ + static const Table<2,bool> default_empty_table; + + /** + * This function actually implements the local_to_global function for + * standard (non-block) matrices. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs, + std::integral_constant) const; + + /** + * This function actually implements the local_to_global function for block + * matrices. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs, + std::integral_constant) const; + + /** + * This function actually implements the local_to_global function for + * standard (non-block) sparsity types. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + std::integral_constant) const; + + /** + * This function actually implements the local_to_global function for block + * sparsity types. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + std::integral_constant) const; + + /** + * Internal helper function for distribute_local_to_global function. + * + * Creates a list of affected global rows for distribution, including the + * local rows where the entries come from. The list is sorted according to + * the global row indices. + */ + void + make_sorted_row_list (const std::vector &local_dof_indices, + internals::GlobalRowsFromLocal &global_rows) const; + + /** + * Internal helper function for add_entries_local_to_global function. + * + * Creates a list of affected rows for distribution without any additional + * information, otherwise similar to the other make_sorted_row_list() + * function. + */ + void + make_sorted_row_list (const std::vector &local_dof_indices, + std::vector &active_dofs) const; + + /** + * Internal helper function for distribute_local_to_global function. + */ + template + typename ProductType::type + resolve_vector_entry (const size_type i, + const internals::GlobalRowsFromLocal &global_rows, + const Vector &local_vector, + const std::vector &local_dof_indices, + const FullMatrix &local_matrix) const; +}; + + + +/* ---------------- template and inline functions ----------------- */ + +inline +AffineConstraints::AffineConstraints (const IndexSet &local_constraints) + : + lines (), + local_lines (local_constraints), + sorted (false) +{ + // make sure the IndexSet is compressed. Otherwise this can lead to crashes + // that are hard to find (only happen in release mode). + // see tests/mpi/affine_constraints_crash_01 + local_lines.compress(); +} + + + +inline +AffineConstraints::AffineConstraints (const AffineConstraints &affine_constraints) + : + Subscriptor (), + lines (affine_constraints.lines), + lines_cache (affine_constraints.lines_cache), + local_lines (affine_constraints.local_lines), + sorted (affine_constraints.sorted) +{} + + +inline +void +AffineConstraints::add_line (const size_type line) +{ + Assert (sorted==false, ExcMatrixIsClosed()); + + // the following can happen when we compute with distributed meshes and dof + // handlers and we constrain a degree of freedom whose number we don't have + // locally. if we don't abort here the program will try to allocate several + // terabytes of memory to resize the various arrays below :-) + Assert (line != numbers::invalid_size_type, + ExcInternalError()); + const size_type line_index = calculate_line_index (line); + + // check whether line already exists; it may, in which case we can just quit + if (is_constrained(line)) + return; + + // if necessary enlarge vector of existing entries for cache + if (line_index >= lines_cache.size()) + lines_cache.resize (std::max(2*static_cast(lines_cache.size()), + line_index+1), + numbers::invalid_size_type); + + // push a new line to the end of the list + lines.emplace_back (); + lines.back().index = line; + lines.back().inhomogeneity = 0.; + lines_cache[line_index] = lines.size()-1; +} + + + +inline +void +AffineConstraints::add_entry (const size_type line, + const size_type column, + const double value) +{ + Assert (sorted==false, ExcMatrixIsClosed()); + Assert (line != column, + ExcMessage ("Can't constrain a degree of freedom to itself")); + + // Ensure that the current line is present in the cache: + const size_type line_index = calculate_line_index(line); + Assert (line_index < lines_cache.size(), + ExcMessage("The current AffineConstraints does not contain the line " + "for the current entry. Call AffineConstraints::add_line " + "before calling this function.")); + + // if in debug mode, check whether an entry for this column already exists + // and if it's the same as the one entered at present + // + // in any case: exit the function if an entry for this column already + // exists, since we don't want to enter it twice + Assert (lines_cache[line_index] != numbers::invalid_size_type, + ExcInternalError()); + Assert (!local_lines.size() || local_lines.is_element(column), + ExcColumnNotStoredHere(line, column)); + ConstraintLine *line_ptr = &lines[lines_cache[line_index]]; + Assert (line_ptr->index == line, ExcInternalError()); + for (ConstraintLine::Entries::const_iterator + p=line_ptr->entries.begin(); + p != line_ptr->entries.end(); ++p) + if (p->first == column) + { + Assert (std::fabs(p->second - value) < 1.e-14, + ExcEntryAlreadyExists(line, column, p->second, value)); + return; + } + + line_ptr->entries.emplace_back (column, value); +} + + + +inline +void +AffineConstraints::set_inhomogeneity (const size_type line, + const double value) +{ + const size_type line_index = calculate_line_index(line); + Assert( line_index < lines_cache.size() && + lines_cache[line_index] != numbers::invalid_size_type, + ExcMessage("call add_line() before calling set_inhomogeneity()")); + Assert(lines_cache[line_index] < lines.size(), ExcInternalError()); + ConstraintLine *line_ptr = &lines[lines_cache[line_index]]; + line_ptr->inhomogeneity = value; +} + + + +inline +types::global_dof_index +AffineConstraints::n_constraints () const +{ + return lines.size(); +} + + + +inline +bool +AffineConstraints::is_constrained (const size_type index) const +{ + const size_type line_index = calculate_line_index(index); + return ((line_index < lines_cache.size()) + && + (lines_cache[line_index] != numbers::invalid_size_type)); +} + + + +inline +bool +AffineConstraints::is_inhomogeneously_constrained (const size_type index) const +{ + // check whether the entry is constrained. could use is_constrained, but + // that means computing the line index twice + const size_type line_index = calculate_line_index(index); + if (line_index >= lines_cache.size() || + lines_cache[line_index] == numbers::invalid_size_type) + return false; + else + { + Assert(lines_cache[line_index] < lines.size(), ExcInternalError()); + return !(lines[lines_cache[line_index]].inhomogeneity == 0); + } +} + + + +inline +const std::vector > * +AffineConstraints::get_constraint_entries (const size_type line) const +{ + // check whether the entry is constrained. could use is_constrained, but + // that means computing the line index twice + const size_type line_index = calculate_line_index(line); + if (line_index >= lines_cache.size() || + lines_cache[line_index] == numbers::invalid_size_type) + return nullptr; + else + return &lines[lines_cache[line_index]].entries; +} + + + +inline +double +AffineConstraints::get_inhomogeneity (const size_type line) const +{ + // check whether the entry is constrained. could use is_constrained, but + // that means computing the line index twice + const size_type line_index = calculate_line_index(line); + if (line_index >= lines_cache.size() || + lines_cache[line_index] == numbers::invalid_size_type) + return 0; + else + return lines[lines_cache[line_index]].inhomogeneity; +} + + + +inline types::global_dof_index +AffineConstraints::calculate_line_index (const size_type line) const +{ + //IndexSet is unused (serial case) + if (!local_lines.size()) + return line; + + Assert(local_lines.is_element(line), + ExcRowNotStoredHere(line)); + + return local_lines.index_within_set(line); +} + + + +inline bool +AffineConstraints::can_store_line (size_type line_index) const +{ + return !local_lines.size() || local_lines.is_element(line_index); +} + + + +inline +const IndexSet & +AffineConstraints::get_local_lines () const +{ + return local_lines; +} + + + +template +inline +void AffineConstraints::distribute_local_to_global ( + const size_type index, + const double value, + VectorType &global_vector) const +{ + Assert (lines.empty() || sorted == true, ExcMatrixNotClosed()); + + if (is_constrained(index) == false) + global_vector(index) += value; + else + { + const ConstraintLine &position = + lines[lines_cache[calculate_line_index(index)]]; + for (size_type j=0; j +inline +void AffineConstraints::distribute_local_to_global ( + ForwardIteratorVec local_vector_begin, + ForwardIteratorVec local_vector_end, + ForwardIteratorInd local_indices_begin, + VectorType &global_vector) const +{ + Assert (lines.empty() || sorted == true, ExcMatrixNotClosed()); + for ( ; local_vector_begin != local_vector_end; + ++local_vector_begin, ++local_indices_begin) + { + if (is_constrained(*local_indices_begin) == false) + internal::ElementAccess::add(*local_vector_begin, + *local_indices_begin, global_vector); + else + { + const ConstraintLine &position = + lines[lines_cache[calculate_line_index(*local_indices_begin)]]; + for (size_type j=0; j::add((*local_vector_begin) * position.entries[j].second, + position.entries[j].first, + global_vector); + } + } +} + + +template +inline +void +AffineConstraints::distribute_local_to_global ( + const InVector &local_vector, + const std::vector &local_dof_indices, + OutVector &global_vector) const +{ + Assert (local_vector.size() == local_dof_indices.size(), + ExcDimensionMismatch(local_vector.size(), local_dof_indices.size())); + distribute_local_to_global (local_vector.begin(), local_vector.end(), + local_dof_indices.begin(), global_vector); +} + + + +template +inline +void AffineConstraints::get_dof_values (const VectorType &global_vector, + ForwardIteratorInd local_indices_begin, + ForwardIteratorVec local_vector_begin, + ForwardIteratorVec local_vector_end) const +{ + Assert (lines.empty() || sorted == true, ExcMatrixNotClosed()); + for ( ; local_vector_begin != local_vector_end; + ++local_vector_begin, ++local_indices_begin) + { + if (is_constrained(*local_indices_begin) == false) + *local_vector_begin = global_vector(*local_indices_begin); + else + { + const ConstraintLine &position = + lines[lines_cache[calculate_line_index(*local_indices_begin)]]; + typename VectorType::value_type value = position.inhomogeneity; + for (size_type j=0; j class BlockMatrixBase; +template class BlockSparsityPatternBase; +template class BlockSparseMatrixEZ; + +/** + * A class that can be used to determine whether a given type is a block + * matrix type or not. For example, + * @code + * IsBlockMatrix >::value + * @endcode + * has the value false, whereas + * @code + * IsBlockMatrix >::value + * @endcode + * is true. This is sometimes useful in template contexts where we may want to + * do things differently depending on whether a template type denotes a + * regular or a block matrix type. + * + * @see + * @ref GlossBlockLA "Block (linear algebra)" + * @author Wolfgang Bangerth, 2009 + */ +template +struct IsBlockMatrix +{ +private: + struct yes_type + { + char c[1]; + }; + struct no_type + { + char c[2]; + }; + + /** + * Overload returning true if the class is derived from BlockMatrixBase, + * which is what block matrices do (with the exception of + * BlockSparseMatrixEZ). + */ + template + static yes_type check_for_block_matrix (const BlockMatrixBase *); + + /** + * Overload returning true if the class is derived from + * BlockSparsityPatternBase, which is what block sparsity patterns do. + */ + template + static yes_type check_for_block_matrix (const BlockSparsityPatternBase *); + + /** + * Overload for BlockSparseMatrixEZ, which is the only block matrix not + * derived from BlockMatrixBase at the time of writing this class. + */ + template + static yes_type check_for_block_matrix (const BlockSparseMatrixEZ *); + + /** + * Catch all for all other potential matrix types that are not block + * matrices. + */ + static no_type check_for_block_matrix (...); + +public: + /** + * A statically computable value that indicates whether the template + * argument to this class is a block matrix (in fact whether the type is + * derived from BlockMatrixBase). + */ + static const bool value = (sizeof(check_for_block_matrix + ((MatrixType *)nullptr)) + == + sizeof(yes_type)); +}; + + +// instantiation of the static member +template +const bool IsBlockMatrix::value; + + +template +inline +void +AffineConstraints:: +distribute_local_to_global (const FullMatrix &local_matrix, + const std::vector &local_dof_indices, + MatrixType &global_matrix) const +{ + // create a dummy and hand on to the function actually implementing this + // feature in the cm.templates.h file. + Vector dummy(0); + distribute_local_to_global (local_matrix, dummy, local_dof_indices, + global_matrix, dummy, false, + std::integral_constant::value>()); +} + + + + +template +inline +void +AffineConstraints:: +distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs) const +{ + // enter the internal function with the respective block information set, + // the actual implementation follows in the cm.templates.h file. + distribute_local_to_global (local_matrix, local_vector, local_dof_indices, + global_matrix, global_vector, use_inhomogeneities_for_rhs, + std::integral_constant::value>()); +} + + + + +template +inline +void +AffineConstraints:: +add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask) const +{ + // enter the internal function with the respective block information set, + // the actual implementation follows in the cm.templates.h file. + add_entries_local_to_global (local_dof_indices, sparsity_pattern, + keep_constrained_entries, dof_mask, + std::integral_constant::value>()); +} + + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h new file mode 100644 index 0000000000..db9c1e204e --- /dev/null +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -0,0 +1,2853 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 1999 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#ifndef dealii_affine_constraints_templates_h +#define dealii_affine_constraints_templates_h + + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +DEAL_II_NAMESPACE_OPEN + + +template +void +AffineConstraints::condense (SparseMatrix &uncondensed) const +{ + Vector dummy (0); + condense (uncondensed, dummy); +} + + + +template +void +AffineConstraints::condense (BlockSparseMatrix &uncondensed) const +{ + BlockVector dummy (0); + condense (uncondensed, dummy); +} + + + +template +void +AffineConstraints::condense (const VectorType &vec_ghosted, + VectorType &vec) const +{ + Assert (sorted == true, ExcMatrixNotClosed()); + + // if this is called with different arguments, we need to copy the data over: + if (&vec != &vec_ghosted) + vec = vec_ghosted; + + // distribute all entries, and set them to zero. do so in + // two loops because in the first one we need to add to elements + // and in the second one we need to set elements to zero. for + // parallel vectors, this can only work if we can put a compress() + // in between, but we don't want to call compress() twice per entry + for (std::vector::const_iterator + constraint_line = lines.begin(); + constraint_line!=lines.end(); ++constraint_line) + { + // in case the constraint is + // inhomogeneous, this function is not + // appropriate. Throw an exception. + Assert (constraint_line->inhomogeneity == 0., + ExcMessage ("Inhomogeneous constraint cannot be condensed " + "without any matrix specified.")); + + const typename VectorType::value_type old_value = vec_ghosted(constraint_line->index); + for (size_type q=0; q!=constraint_line->entries.size(); ++q) + if (vec.in_local_range(constraint_line->entries[q].first) == true) + vec(constraint_line->entries[q].first) + += (static_cast + (old_value) * + constraint_line->entries[q].second); + } + + vec.compress(VectorOperation::add); + + for (std::vector::const_iterator + constraint_line = lines.begin(); + constraint_line!=lines.end(); ++constraint_line) + if (vec.in_local_range(constraint_line->index) == true) + vec(constraint_line->index) = 0.; + + vec.compress(VectorOperation::insert); +} + + + +template +void +AffineConstraints::condense (VectorType &vec) const +{ + condense(vec, vec); +} + + + +template +void +AffineConstraints::condense (SparseMatrix &uncondensed, + VectorType &vec) const +{ + // check whether we work on real vectors + // or we just used a dummy when calling + // the other function above. + const bool use_vectors = vec.size() == 0 ? false : true; + + const SparsityPattern &sparsity = uncondensed.get_sparsity_pattern (); + + Assert (sorted == true, ExcMatrixNotClosed()); + Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed()); + Assert (sparsity.n_rows() == sparsity.n_cols(), + ExcNotQuadratic()); + if (use_vectors == true) + AssertDimension (vec.size(), sparsity.n_rows()); + + double average_diagonal = 0; + for (size_type i=0; i distribute (sparsity.n_rows(), + numbers::invalid_size_type); + + for (size_type c=0; c::iterator + entry = uncondensed.begin(row); + entry != uncondensed.end(row); ++entry) + { + const size_type column = entry->column(); + + // end of row reached? + // this should not + // happen, since we only + // operate on compressed + // matrices! + Assert (column != SparsityPattern::invalid_entry, + ExcMatrixNotClosed()); + + if (distribute[column] != numbers::invalid_size_type) + // distribute entry at + // regular row @p row + // and irregular column + // sparsity.get_column_numbers()[j]; + // set old entry to + // zero + { + for (size_type q=0; + q!=lines[distribute[column]].entries.size(); ++q) + { + // need a temporary variable to avoid errors like + // no known conversion from 'complex::type>' to 'const complex' for 3rd argument + number v = static_cast(entry->value()); + v *=lines[distribute[column]].entries[q].second; + uncondensed.add (row, + lines[distribute[column]].entries[q].first, + v); + } + + // need to subtract this element from the + // vector. this corresponds to an + // explicit elimination in the respective + // row of the inhomogeneous constraint in + // the matrix with Gauss elimination + if (use_vectors == true) + vec(row) -= + static_cast(entry->value()) * lines[distribute[column]].inhomogeneity; + + // set old value to zero + entry->value() = 0.; + } + } + } + else + // row must be distributed + { + for (typename SparseMatrix::iterator + entry = uncondensed.begin(row); + entry != uncondensed.end(row); ++entry) + { + const size_type column = entry->column(); + + // end of row reached? + // this should not + // happen, since we only + // operate on compressed + // matrices! + Assert (column != SparsityPattern::invalid_entry, + ExcMatrixNotClosed()); + + if (distribute[column] == numbers::invalid_size_type) + // distribute entry at + // irregular row + // @p row and regular + // column + // column. set + // old entry to zero + { + for (size_type q=0; + q!=lines[distribute[row]].entries.size(); ++q) + { + // need a temporary variable to avoid errors like + // no known conversion from 'complex::type>' to 'const complex' for 3rd argument + number v = static_cast(entry->value()); + v *= lines[distribute[row]].entries[q].second; + uncondensed.add (lines[distribute[row]].entries[q].first, + column, + v); + } + + // set old entry to zero + entry->value() = 0.; + } + else + // distribute entry at + // irregular row @p row and + // irregular column + // @p column set old entry + // to one on main + // diagonal, zero otherwise + { + for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + { + for (size_type q=0; + q!=lines[distribute[column]].entries.size(); ++q) + { + // need a temporary variable to avoid errors like + // no known conversion from 'complex::type>' to 'const complex' for 3rd argument + number v = static_cast(entry->value()); + v *= lines[distribute[row]].entries[p].second * + lines[distribute[column]].entries[q].second; + uncondensed.add (lines[distribute[row]].entries[p].first, + lines[distribute[column]].entries[q].first, + v); + } + + if (use_vectors == true) + vec(lines[distribute[row]].entries[p].first) -= + static_cast(entry->value()) * lines[distribute[row]].entries[p].second * + lines[distribute[column]].inhomogeneity; + } + + // set old entry to correct + // value + entry->value() = (row == column ? average_diagonal : 0. ); + } + } + + // take care of vector + if (use_vectors == true) + { + for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q) + vec(lines[distribute[row]].entries[q].first) + += (vec(row) * lines[distribute[row]].entries[q].second); + + vec(lines[distribute[row]].index) = 0.; + } + } + } +} + + + +template +void +AffineConstraints::condense (BlockSparseMatrix &uncondensed, + BlockVectorType &vec) const +{ + // check whether we work on real vectors + // or we just used a dummy when calling + // the other function above. + const bool use_vectors = vec.n_blocks() == 0 ? false : true; + + const size_type blocks = uncondensed.n_block_rows(); + + const BlockSparsityPattern & + sparsity = uncondensed.get_sparsity_pattern (); + + Assert (sorted == true, ExcMatrixNotClosed()); + Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed()); + Assert (sparsity.n_rows() == sparsity.n_cols(), + ExcNotQuadratic()); + Assert (sparsity.n_block_rows() == sparsity.n_block_cols(), + ExcNotQuadratic()); + Assert (sparsity.n_block_rows() == sparsity.n_block_cols(), + ExcNotQuadratic()); + Assert (sparsity.get_column_indices() == sparsity.get_row_indices(), + ExcNotQuadratic()); + + if (use_vectors == true) + { + AssertDimension (vec.size(), sparsity.n_rows()); + AssertDimension (vec.n_blocks(), sparsity.n_block_rows()); + } + + double average_diagonal = 0; + for (size_type b=0; b distribute (sparsity.n_rows(), + numbers::invalid_size_type); + + for (size_type c=0; c + block_index = index_mapping.global_to_local(row); + const size_type block_row = block_index.first; + + if (distribute[row] == numbers::invalid_size_type) + // regular line. loop over + // all columns and see + // whether this column must + // be distributed + { + + // to loop over all entries + // in this row, we have to + // loop over all blocks in + // this blockrow and the + // corresponding row + // therein + for (size_type block_col=0; block_col::iterator + entry = uncondensed.block(block_row, block_col).begin(block_index.second); + entry != uncondensed.block(block_row, block_col).end(block_index.second); + ++entry) + { + const size_type global_col + = index_mapping.local_to_global(block_col,entry->column()); + + if (distribute[global_col] != numbers::invalid_size_type) + // distribute entry at + // regular row @p row + // and irregular column + // global_col; set old + // entry to zero + { + const double old_value = entry->value (); + + for (size_type q=0; + q!=lines[distribute[global_col]].entries.size(); ++q) + uncondensed.add (row, + lines[distribute[global_col]].entries[q].first, + old_value * + lines[distribute[global_col]].entries[q].second); + + // need to subtract this element from the + // vector. this corresponds to an + // explicit elimination in the respective + // row of the inhomogeneous constraint in + // the matrix with Gauss elimination + if (use_vectors == true) + vec(row) -= entry->value() * + lines[distribute[global_col]].inhomogeneity; + + entry->value() = 0.; + } + } + } + } + else + { + // row must be + // distributed. split the + // whole row into the + // chunks defined by the + // blocks + for (size_type block_col=0; block_col::iterator + entry = uncondensed.block(block_row, block_col).begin(block_index.second); + entry != uncondensed.block(block_row, block_col).end(block_index.second); + ++entry) + { + const size_type global_col + = index_mapping.local_to_global (block_col, entry->column()); + + if (distribute[global_col] == + numbers::invalid_size_type) + // distribute + // entry at + // irregular + // row @p row + // and regular + // column + // global_col. set + // old entry to + // zero + { + const double old_value = entry->value(); + + for (size_type q=0; + q!=lines[distribute[row]].entries.size(); ++q) + uncondensed.add (lines[distribute[row]].entries[q].first, + global_col, + old_value * + lines[distribute[row]].entries[q].second); + + entry->value() = 0.; + } + else + // distribute entry at + // irregular row @p row + // and irregular column + // @p global_col set old + // entry to one if on + // main diagonal, zero + // otherwise + { + const double old_value = entry->value (); + + for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + { + for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q) + uncondensed.add (lines[distribute[row]].entries[p].first, + lines[distribute[global_col]].entries[q].first, + old_value * + lines[distribute[row]].entries[p].second * + lines[distribute[global_col]].entries[q].second); + + if (use_vectors == true) + vec(lines[distribute[row]].entries[p].first) -= + old_value * lines[distribute[row]].entries[p].second * + lines[distribute[global_col]].inhomogeneity; + } + + entry->value() = (row == global_col ? average_diagonal : 0. ); + } + } + } + + // take care of vector + if (use_vectors == true) + { + for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q) + vec(lines[distribute[row]].entries[q].first) + += (vec(row) * lines[distribute[row]].entries[q].second); + + vec(lines[distribute[row]].index) = 0.; + } + } + } +} + + +//TODO: I'm sure the following could be made more elegant by using a bit of +//introspection using static member variables of the various vector +//classes to dispatch between the different functions, rather than using +//knowledge of the individual types + +// number of functions to select the right implementation for set_zero(). +namespace internal +{ + namespace AffineConstraintsImplementation + { + namespace + { + typedef types::global_dof_index size_type; + + template + void set_zero_parallel(const std::vector &cm, + VectorType &vec, + size_type shift = 0) + { + Assert(!vec.has_ghost_elements(), ExcInternalError()); + IndexSet locally_owned = vec.locally_owned_elements(); + for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); ++it) + { + // If shift>0 then we are working on a part of a BlockVector + // so vec(i) is actually the global entry i+shift. + // We first make sure the line falls into the range of vec, + // then check if is part of the local part of the vector, before + // finally setting it to 0. + if ((*it)::set(0., idx, vec); + } + } + + template + void set_zero_parallel(const std::vector &cm, LinearAlgebra::distributed::Vector &vec, size_type shift = 0) + { + for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); ++it) + { + // If shift>0 then we are working on a part of a BlockVector + // so vec(i) is actually the global entry i+shift. + // We first make sure the line falls into the range of vec, + // then check if is part of the local part of the vector, before + // finally setting it to 0. + if ((*it) + void set_zero_in_parallel(const std::vector &cm, + VectorType &vec, + std::integral_constant) + { + set_zero_parallel(cm, vec, 0); + } + + // in parallel for BlockVectors + template + void set_zero_in_parallel(const std::vector &cm, + VectorType &vec, + std::integral_constant) + { + size_type start_shift = 0; + for (size_type j=0; j + void set_zero_serial(const std::vector &cm, + VectorType &vec) + { + for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); ++it) + vec(*it) = 0.; + } + + template + void set_zero_all(const std::vector &cm, + VectorType &vec) + { + set_zero_in_parallel(cm, vec, std::integral_constant::value>()); + vec.compress(VectorOperation::insert); + } + + + template + void set_zero_all(const std::vector &cm, + dealii::Vector &vec) + { + set_zero_serial(cm, vec); + } + + template + void set_zero_all(const std::vector &cm, + dealii::BlockVector &vec) + { + set_zero_serial(cm, vec); + } + } + } +} + + +template +void +AffineConstraints::set_zero (VectorType &vec) const +{ + // since we lines is a private member, we cannot pass it to the functions + // above. therefore, copy the content which is cheap + std::vector constrained_lines(lines.size()); + for (unsigned int i=0; i +void +AffineConstraints:: +distribute_local_to_global (const Vector &local_vector, + const std::vector &local_dof_indices, + VectorType &global_vector, + const FullMatrix &local_matrix) const +{ + distribute_local_to_global(local_vector,local_dof_indices,local_dof_indices, global_vector, local_matrix, true); +} + + + +template +void +AffineConstraints:: +distribute_local_to_global (const Vector &local_vector, + const std::vector &local_dof_indices_row, + const std::vector &local_dof_indices_col, + VectorType &global_vector, + const FullMatrix &local_matrix, + bool diagonal) const +{ + Assert (sorted == true, ExcMatrixNotClosed()); + AssertDimension (local_vector.size(), local_dof_indices_row.size()); + AssertDimension (local_matrix.m(), local_dof_indices_row.size()); + AssertDimension (local_matrix.n(), local_dof_indices_col.size()); + + // diagonal checks if we have only one index set (if both are equal + // diagonal should be set to true). + // If true we do both, assembly of the right hand side (next lines) + // and (see further below) modifications of the right hand side + // according to the inhomogeneous constraints. + // Otherwise we only modify the right hand side according to + // local_matrix and the inhomogeneous constraints, and omit the vector add. + + const size_type m_local_dofs = local_dof_indices_row.size(); + const size_type n_local_dofs = local_dof_indices_col.size(); + if (lines.empty()) + { + if (diagonal) + global_vector.add(local_dof_indices_row, local_vector); + } + else + for (size_type i=0; iinhomogeneity; + if (val != 0) + for (size_type j=0; jentries.size(); ++j) + { + Assert (!(!local_lines.size() + || local_lines.is_element(position->entries[j].first)) + || is_constrained(position->entries[j].first) == false, + ExcMessage ("Tried to distribute to a fixed dof.")); + global_vector(position->entries[j].first) + += local_vector(i) * position->entries[j].second; + } + } + } +} + + + +namespace internal +{ + namespace + { + // create an output vector that consists of the input vector's locally owned + // elements plus some ghost elements that need to be imported from elsewhere + // + // this is an operation that is different for all vector types and so we + // need a few overloads +#ifdef DEAL_II_WITH_TRILINOS + void + import_vector_with_ghost_elements (const TrilinosWrappers::MPI::Vector &vec, + const IndexSet &/*locally_owned_elements*/, + const IndexSet &needed_elements, + TrilinosWrappers::MPI::Vector &output, + const std::integral_constant /*is_block_vector*/) + { + Assert(!vec.has_ghost_elements(), + ExcGhostsPresent()); +#ifdef DEAL_II_WITH_MPI + const Epetra_MpiComm *mpi_comm + = dynamic_cast(&vec.trilinos_vector().Comm()); + + Assert (mpi_comm != nullptr, ExcInternalError()); + output.reinit (needed_elements, mpi_comm->GetMpiComm()); +#else + output.reinit (needed_elements, MPI_COMM_SELF); +#endif + output = vec; + } +#endif + +#ifdef DEAL_II_WITH_PETSC + void + import_vector_with_ghost_elements (const PETScWrappers::MPI::Vector &vec, + const IndexSet &locally_owned_elements, + const IndexSet &needed_elements, + PETScWrappers::MPI::Vector &output, + const std::integral_constant /*is_block_vector*/) + { + output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator()); + output = vec; + } +#endif + + template + void + import_vector_with_ghost_elements (const LinearAlgebra::distributed::Vector &vec, + const IndexSet &locally_owned_elements, + const IndexSet &needed_elements, + LinearAlgebra::distributed::Vector &output, + const std::integral_constant /*is_block_vector*/) + { + // TODO: the in vector might already have all elements. need to find a + // way to efficiently avoid the copy then + const_cast&>(vec).zero_out_ghosts(); + output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator()); + output = vec; + output.update_ghost_values(); + } + + + // all other vector non-block vector types are sequential and we should + // not have this function called at all -- so throw an exception + template + void + import_vector_with_ghost_elements (const Vector &/*vec*/, + const IndexSet &/*locally_owned_elements*/, + const IndexSet &/*needed_elements*/, + Vector &/*output*/, + const std::integral_constant /*is_block_vector*/) + { + Assert (false, ExcMessage ("We shouldn't even get here!")); + } + + + // for block vectors, simply dispatch to the individual blocks + template + void + import_vector_with_ghost_elements (const VectorType &vec, + const IndexSet &locally_owned_elements, + const IndexSet &needed_elements, + VectorType &output, + const std::integral_constant /*is_block_vector*/) + { + output.reinit (vec.n_blocks()); + + types::global_dof_index block_start = 0; + for (unsigned int b=0; b()); + block_start += vec.block(b).size(); + } + + output.collect_sizes (); + } + } +} + + +template +void +AffineConstraints::distribute (VectorType &vec) const +{ + Assert (sorted==true, ExcMatrixNotClosed()); + + // if the vector type supports parallel storage and if the vector actually + // does store only part of the vector, distributing is slightly more + // complicated. we might be able to skip the complicated part if one + // processor owns everything and pretend that this is a sequential vector, + // but it is difficult for the other processors to know whether they should + // not do anything or if other processors will create a temporary vector, + // exchange data (requiring communication, maybe even with the processors + // that do not own anything because of that particular parallel model), and + // call compress() finally. the first case here is for the complicated case, + // the last else is for the simple case (sequential vector) + const IndexSet vec_owned_elements = vec.locally_owned_elements(); + + if ( dealii::is_serial_vector< VectorType >::value == false ) + { + // This processor owns only part of the vector. one may think that + // every processor should be able to simply communicate those elements + // it owns and for which it knows that they act as sources to constrained + // DoFs to the owner of these DoFs. This would lead to a scheme where all + // we need to do is to add some local elements to (possibly non-local) ones + // and then call compress(). + // + // Alas, this scheme does not work as evidenced by the disaster of bug #51, + // see http://code.google.com/p/dealii/issues/detail?id=51 and the + // reversion of one attempt that implements this in r29662. Rather, we + // need to get a vector that has all the *sources* or constraints we + // own locally, possibly as ghost vector elements, then read from them, + // and finally throw away the ghosted vector. Implement this in the following. + IndexSet needed_elements = vec_owned_elements; + + typedef std::vector::const_iterator constraint_iterator; + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + if (vec_owned_elements.is_element(it->index)) + for (unsigned int i=0; ientries.size(); ++i) + if (!vec_owned_elements.is_element(it->entries[i].first)) + needed_elements.add_index(it->entries[i].first); + + VectorType ghosted_vector; + internal::import_vector_with_ghost_elements (vec, + vec_owned_elements, needed_elements, + ghosted_vector, + std::integral_constant::value>()); + + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + if (vec_owned_elements.is_element(it->index)) + { + typename VectorType::value_type + new_value = it->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (static_cast + (internal::ElementAccess::get( + ghosted_vector, it->entries[i].first)) * + it->entries[i].second); + AssertIsFinite(new_value); + internal::ElementAccess::set(new_value, it->index, vec); + } + + // now compress to communicate the entries that we added to + // and that weren't to local processors to the owner + // + // this shouldn't be strictly necessary but it probably doesn't + // hurt either + vec.compress (VectorOperation::insert); + } + else + // purely sequential vector (either because the type doesn't + // support anything else or because it's completely stored + // locally) + { + std::vector::const_iterator next_constraint = lines.begin(); + for (; next_constraint != lines.end(); ++next_constraint) + { + // fill entry in line + // next_constraint.index by adding the + // different contributions + typename VectorType::value_type + new_value = next_constraint->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (static_cast + (internal::ElementAccess::get( + vec, next_constraint->entries[i].first))* + next_constraint->entries[i].second); + AssertIsFinite(new_value); + internal::ElementAccess::set(new_value, next_constraint->index, + vec); + } + } +} + + + +// Some helper definitions for the local_to_global functions. +namespace internals +{ + typedef types::global_dof_index size_type; + + // this struct contains all the information we need to store about each of + // the global entries (global_row): are they obtained directly by some local + // entry (local_row) or some constraints (constraint_position). This is not + // directly used in the user code, but accessed via the GlobalRowsFromLocal. + // + // The actions performed here correspond to reshaping the constraint + // information from global degrees of freedom to local ones (i.e., + // cell-related DoFs), and also transforming the constraint information from + // compressed row storage (each local dof that is constrained has a list of + // constraint entries associated to it) into compressed column storage based + // on the cell-related DoFs (we have a list of global degrees of freedom, + // and to each we have a list of local rows where the entries come from). To + // increase the speed, we additionally store whether an entry is generated + // directly from the local degrees of freedom or whether it comes from a + // constraint. + struct Distributing + { + Distributing (const size_type global_row = numbers::invalid_size_type, + const size_type local_row = numbers::invalid_size_type); + Distributing (const Distributing &in); + Distributing &operator = (const Distributing &in); + bool operator < (const Distributing &in) const + { + return global_row > >, but tuned so that + // frequent memory allocation for each entry is avoided. The data is put + // into a std::vector > and the row length is kept + // fixed at row_length. Both the number of rows and the row length can + // change is this structure is filled. In that case, the data is + // rearranged. This is not directly used in the user code, but accessed via + // the GlobalRowsFromLocal. + struct DataCache + { + DataCache () + : + row_length (8) + {} + + void reinit () + { + individual_size.resize(0); + data.resize(0); + } + + size_type insert_new_index (const std::pair &pair) + { + Assert(row_length > 0, ExcInternalError()); + const unsigned int index = individual_size.size(); + individual_size.push_back(1); + data.resize(individual_size.size()*row_length); + data[index*row_length] = pair; + individual_size[index] = 1; + return index; + } + + void append_index (const size_type index, + const std::pair &pair) + { + AssertIndexRange (index, individual_size.size()); + const size_type my_length = individual_size[index]; + if (my_length == row_length) + { + AssertDimension(data.size(), individual_size.size()*row_length); + // no space left in this row, need to double row_length and + // rearrange the data items. Move all items to the right except the + // first one, starting at the back. Since individual_size contains + // at least one element when we get here, subtracting 1 works fine. + data.resize(2*data.size()); + for (size_type i=individual_size.size()-1; i>0; --i) + { + const auto ptr = data.data(); + std::move_backward(ptr + i*row_length, + ptr + i*row_length + individual_size[i], + ptr + i*2*row_length + individual_size[i]); + } + row_length *= 2; + } + data[index*row_length+my_length] = pair; + individual_size[index] = my_length + 1; + } + + size_type + get_size (const size_type index) const + { + return individual_size[index]; + } + + const std::pair * + get_entry (const size_type index) const + { + return &data[index*row_length]; + } + + size_type row_length; + + std::vector > data; + + std::vector individual_size; + }; + + + + // collects all the global rows from a local contribution (cell) and their + // origin (direct/constraint). this is basically a vector consisting of + // "Distributing" structs using access via the DataCache. Provides some + // specialized sort and insert functions. + // + // in case there are no constraints, this is basically a list of pairs + // with the first index being the global index and the second + // index the local index. The list is sorted with respect to the global + // index. + // + // in case there are constraints, a global dof might get a contribution also + // because it gets data from a constrained dof. This means that a global dof + // might also have indirect contributions from a local dof via a constraint, + // besides the direct ones. + // + // The actions performed here correspond to reshaping the constraint + // information from global degrees of freedom to local ones (i.e., + // cell-related DoFs), and also transforming the constraint information from + // compressed row storage (each local dof that is constrained has a list of + // constraint entries associated to it) into compressed column storage based + // on the cell-related DoFs (we have a list of global degrees of freedom, + // and to each we have a list of local rows where the entries come from). To + // increase the speed, we additionally store whether an entry is generated + // directly from the local degrees of freedom or whether it comes from a + // constraint. + class GlobalRowsFromLocal + { + public: + GlobalRowsFromLocal () + : + n_active_rows (0), + n_inhomogeneous_rows (0) + {} + + void reinit (const size_type n_local_rows) + { + total_row_indices.resize(n_local_rows); + for (unsigned int i=0; i= n_inhomogeneous_rows, ExcInternalError()); + std::swap (total_row_indices[n_active_rows+i], + total_row_indices[n_active_rows+n_inhomogeneous_rows]); + n_inhomogeneous_rows++; + } + + // the local row where constraint number i was detected, to find that row + // easily when the GlobalRowsToLocal has been set up + size_type constraint_origin (size_type i) const + { + return total_row_indices[n_active_rows+i].local_row; + } + + // a vector that contains all the global ids and the corresponding local + // ids as well as a pointer to that data where we store how to resolve + // constraints. + std::vector total_row_indices; + + private: + // holds the actual data from the constraints + DataCache data_cache; + + // how many rows there are, constraints disregarded + size_type n_active_rows; + + // the number of rows with inhomogeneous constraints + size_type n_inhomogeneous_rows; + }; + + // a function that appends an additional row to the list of values, or + // appends a value to an already existing row. Similar functionality as for + // std::map, but here done for a + // std::vector, much faster for short lists as we have them + // here + inline + void + GlobalRowsFromLocal::insert_index (const size_type global_row, + const size_type local_row, + const double constraint_value) + { + typedef std::vector::iterator index_iterator; + index_iterator pos, pos1; + Distributing row_value (global_row); + std::pair constraint (local_row, constraint_value); + + // check whether the list was really sorted before entering here + for (size_type i=1; iglobal_row == global_row) + pos1 = pos; + else + { + pos1 = total_row_indices.insert(pos, row_value); + ++n_active_rows; + } + + if (pos1->constraint_position == numbers::invalid_size_type) + pos1->constraint_position = data_cache.insert_new_index (constraint); + else + data_cache.append_index (pos1->constraint_position, constraint); + } + + // this sort algorithm sorts std::vector, but does not take + // the constraints into account. this means that in case that constraints + // are already inserted, this function does not work as expected. Use + // shellsort, which is very fast in case the indices are already sorted + // (which is the usual case with DG elements), and not too slow in other + // cases + inline + void + GlobalRowsFromLocal::sort () + { + size_type i, j, j2, temp, templ, istep; + size_type step; + + // check whether the constraints are really empty. + const size_type length = size(); + + // make sure that we are in the range of the vector + AssertIndexRange (length, total_row_indices.size()+1); + for (size_type i=0; i 0) + { + for (i=step; i < length; i++) + { + istep = step; + j = i; + j2 = j-istep; + temp = total_row_indices[i].global_row; + templ = total_row_indices[i].local_row; + if (total_row_indices[j2].global_row > temp) + { + while ((j >= istep) && (total_row_indices[j2].global_row > temp)) + { + total_row_indices[j].global_row = total_row_indices[j2].global_row; + total_row_indices[j].local_row = total_row_indices[j2].local_row; + j = j2; + j2 -= istep; + } + total_row_indices[j].global_row = temp; + total_row_indices[j].local_row = templ; + } + } + step = step>>1; + } + } + + + + /** + * Scratch data that is used during calls to distribute_local_to_global and + * add_entries_local_to_global. In order to avoid frequent memory + * allocation, we keep the data alive from one call to the next in a static + * variable. Since we want to allow for different number types in matrices, + * this is a template. + * + * Since each thread gets its private version of scratch data out of the + * ThreadLocalStorage, no conflicting access can occur. For this to be + * valid, we need to make sure that no call within + * distribute_local_to_global is made that by itself can spawn tasks. + * Otherwise, we might end up in a situation where several threads fight for + * the data. + * + * Access to the scratch data is only through the accessor class which + * handles the access as well as marking the data as used. + */ + template + class AffineConstraintsData + { + public: + struct ScratchData + { + /** + * Constructor, does nothing. + */ + ScratchData () + : + in_use (false) + {} + + /** + * Copy constructor, does nothing + */ + ScratchData (const ScratchData &) + : + in_use (false) + {} + + /** + * Stores whether the data is currently in use. + */ + bool in_use; + + /** + * Temporary array for column indices + */ + std::vector columns; + + /** + * Temporary array for column values + */ + std::vector values; + + /** + * Temporary array for block start indices + */ + std::vector block_starts; + + /** + * Temporary array for vector indices + */ + std::vector vector_indices; + + /** + * Temporary array for vector values + */ + std::vector vector_values; + + /** + * Data array for reorder row/column indices. + */ + GlobalRowsFromLocal global_rows; + + /** + * Data array for reorder row/column indices. + */ + GlobalRowsFromLocal global_columns; + }; + + /** + * Accessor class to guard access to scratch_data + */ + class ScratchDataAccessor + { + public: + /** + * Constructor. Grabs a scratch data object on the current thread and + * mark it as used + */ + ScratchDataAccessor() + : + my_scratch_data(&AffineConstraintsData::scratch_data.get()) + { + Assert(my_scratch_data->in_use == false, + ExcMessage("Access to thread-local scratch data tried, but it is already " + "in use")); + my_scratch_data->in_use = true; + } + + /** + * Destructor. Mark scratch data as available again. + */ + ~ScratchDataAccessor() + { + my_scratch_data->in_use = false; + } + + /** + * Dereferencing operator. + */ + ScratchData &operator* () + { + return *my_scratch_data; + } + + /** + * Dereferencing operator. + */ + ScratchData *operator-> () + { + return my_scratch_data; + } + + private: + ScratchData *my_scratch_data; + }; + + private: + /** + * The actual data object that contains a scratch data for each thread. + */ + static Threads::ThreadLocalStorage scratch_data; + }; + + + + // function for block matrices: Find out where in the list of local dofs + // (sorted according to global ids) the individual blocks start. Transform + // the global indices to block-local indices in order to be able to use + // functions like vector.block(1)(block_local_id), instead of + // vector(global_id). This avoids transforming indices one-by-one later on. + template + inline + void + make_block_starts (const BlockType &block_object, + GlobalRowsFromLocal &global_rows, + std::vector &block_starts) + { + AssertDimension (block_starts.size(), block_object.n_block_rows()+1); + + typedef std::vector::iterator row_iterator; + row_iterator block_indices = global_rows.total_row_indices.begin(); + + const size_type num_blocks = block_object.n_block_rows(); + const size_type n_active_rows = global_rows.size(); + + // find end of rows. + block_starts[0] = 0; + for (size_type i=1; i instead of + // GlobalRowsFromLocal. Used in functions for sparsity patterns. + template + inline + void + make_block_starts (const BlockType &block_object, + std::vector &row_indices, + std::vector &block_starts) + { + AssertDimension (block_starts.size(), block_object.n_block_rows()+1); + + typedef std::vector::iterator row_iterator; + row_iterator col_indices = row_indices.begin(); + + const size_type num_blocks = block_object.n_block_rows(); + + // find end of rows. + block_starts[0] = 0; + for (size_type i=1; i + static inline + LocalType resolve_matrix_entry (const GlobalRowsFromLocal &global_rows, + const GlobalRowsFromLocal &global_cols, + const size_type i, + const size_type j, + const size_type loc_row, + const FullMatrix &local_matrix) + { + const size_type loc_col = global_cols.local_row(j); + LocalType col_val; + + // case 1: row has direct contribution in local matrix. decide whether col + // has a direct contribution. if not, set the value to zero. + if (loc_row != numbers::invalid_size_type) + { + col_val = ((loc_col != numbers::invalid_size_type) ? + local_matrix(loc_row, loc_col) : 0); + + // account for indirect contributions by constraints in column + for (size_type p=0; p + inline + void + resolve_matrix_row (const GlobalRowsFromLocal &global_rows, + const GlobalRowsFromLocal &global_cols, + const size_type i, + const size_type column_start, + const size_type column_end, + const FullMatrix &local_matrix, + size_type *&col_ptr, + number *&val_ptr) + { + if (column_end == column_start) + return; + + AssertIndexRange (column_end-1, global_cols.size()); + const size_type loc_row = global_rows.local_row(i); + + // fast function if there are no indirect references to any of the local + // rows at all on this set of dofs (saves a lot of checks). the only check + // we actually need to perform is whether the matrix element is zero. + if (global_rows.have_indirect_rows() == false && + global_cols.have_indirect_rows() == false) + { + AssertIndexRange(loc_row, local_matrix.m()); + const LocalType *matrix_ptr = &local_matrix(loc_row, 0); + + for (size_type j=column_start; j (col_val); + *col_ptr++ = global_cols.global_row(j); + } + } + } + + // more difficult part when there are indirect references and when we need + // to do some more checks. + else + { + for (size_type j=column_start; j (col_val); + *col_ptr++ = global_cols.global_row(j); + } + } + } + } + + + + // specialized function that can write into the row of a + // SparseMatrix. + namespace dealiiSparseMatrix + { + template + static inline + void add_value (const LocalType value, + const size_type row, + const size_type column, + SparseMatrixIterator &matrix_values) + { + (void)row; + if (value != LocalType ()) + { + while (matrix_values->column() < column) + ++matrix_values; + Assert (matrix_values->column() == column, + typename SparseMatrix::ExcInvalidIndex(row, column)); + matrix_values->value() += value; + } + } + } + + + // similar as before, now with shortcut for deal.II sparse matrices. this + // lets us avoid using extra arrays, and does all the operations just in + // place, i.e., in the respective matrix row + template + inline + void + resolve_matrix_row (const GlobalRowsFromLocal &global_rows, + const size_type i, + const size_type column_start, + const size_type column_end, + const FullMatrix &local_matrix, + SparseMatrix *sparse_matrix) + { + if (column_end == column_start) + return; + + AssertIndexRange (column_end-1, global_rows.size()); + const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern(); + + if (sparsity.n_nonzero_elements() == 0) + return; + + const size_type row = global_rows.global_row(i); + const size_type loc_row = global_rows.local_row(i); + + typename SparseMatrix::iterator + matrix_values = sparse_matrix->begin(row); + const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols(); + + // distinguish three cases about what can happen for checking whether the + // diagonal is the first element of the row. this avoids if statements at + // the innermost loop positions + + if (!optimize_diagonal) // case 1: no diagonal optimization in matrix + { + if (global_rows.have_indirect_rows() == false) + { + AssertIndexRange (loc_row, local_matrix.m()); + const LocalType *matrix_ptr = &local_matrix(loc_row, 0); + + for (size_type j=column_start; j=column_start && ibegin(row)->value() += matrix_ptr[loc_row]; + for (size_type j=column_start; jbegin(row)->value() += + resolve_matrix_entry (global_rows, global_rows, i, i, + loc_row, local_matrix); + for (size_type j=column_start; jbegin(row)->value() += col_val; + else + dealiiSparseMatrix::add_value(col_val, row, + global_rows.global_row(j), + matrix_values); + } + } + else + { + ++matrix_values; // jump over diagonal element + for (size_type j=column_start; jbegin(row)->value() += col_val; + else + dealiiSparseMatrix::add_value (col_val, row, + global_rows.global_row(j), + matrix_values); + } + } + } + + + + // Same function to resolve all entries that will be added to the given + // global row global_rows[i] as before, now for sparsity pattern + inline + void + resolve_matrix_row (const GlobalRowsFromLocal &global_rows, + const size_type i, + const size_type column_start, + const size_type column_end, + const Table<2,bool> &dof_mask, + std::vector::iterator &col_ptr) + { + if (column_end == column_start) + return; + + const size_type loc_row = global_rows.local_row(i); + + // fast function if there are no indirect references to any of the local + // rows at all on this set of dofs + if (global_rows.have_indirect_rows() == false) + { + Assert(loc_row < dof_mask.n_rows(), + ExcInternalError()); + + for (size_type j=column_start; j + inline void + set_matrix_diagonals (const internals::GlobalRowsFromLocal &global_rows, + const std::vector &local_dof_indices, + const FullMatrix &local_matrix, + const AffineConstraints &constraints, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs) + { + if (global_rows.n_constraints() > 0) + { + typename MatrixType::value_type average_diagonal = typename MatrixType::value_type(); + for (size_type i=0; i(local_matrix.m()); + + for (size_type i=0; i + inline void + set_sparsity_diagonals (const internals::GlobalRowsFromLocal &global_rows, + const std::vector &local_dof_indices, + const Table<2,bool> &dof_mask, + const bool keep_constrained_entries, + SparsityPatternType &sparsity_pattern) + { + // if we got constraints, need to add the diagonal element and, if the + // user requested so, also the rest of the entries in rows and columns + // that have been left out above + if (global_rows.n_constraints() > 0) + { + for (size_type i=0; i &local_dof_indices, + internals::GlobalRowsFromLocal &global_rows) const +{ + const size_type n_local_dofs = local_dof_indices.size(); + AssertDimension (n_local_dofs, global_rows.size()); + + // when distributing the local data to the global matrix, we can quite + // cheaply sort the indices (obviously, this introduces the need for + // allocating some memory on the way, but we need to do this only for rows, + // whereas the distribution process itself goes over rows and columns). This + // has the advantage that when writing into the global matrix, we can make + // use of the sortedness. + + // so the first step is to create a sorted list of all row values that are + // possible. these values are either the rows from unconstrained dofs, or + // some indices introduced by dofs constrained to a combination of some + // other dofs. regarding the data type, choose a std::vector of a + // pair of unsigned ints (for global columns) and internal data (containing + // local columns + possible jumps from constraints). Choosing + // std::map or anything else M.K. knows of would be much more + // expensive here! + + // cache whether we have to resolve any indirect rows generated from + // resolving constrained dofs. + size_type added_rows = 0; + + // first add the indices in an unsorted way and only keep track of the + // constraints that appear. They are resolved in a second step. + for (size_type i = 0; i &local_dof_indices, + std::vector &active_dofs) const +{ + const size_type n_local_dofs = local_dof_indices.size(); + size_type added_rows = 0; + for (size_type i = 0; i0; --i) + { + const size_type local_row = active_dofs.back(); + + // remove constrained entry since we are going to resolve it in place + active_dofs.pop_back(); + const size_type global_row = local_dof_indices[local_row]; + const ConstraintLine &position = + lines[lines_cache[calculate_line_index(global_row)]]; + for (size_type q=0; q::iterator it = + Utilities::lower_bound(active_dofs.begin(), + active_dofs.end()-i+1, + new_index); + if (*it != new_index) + active_dofs.insert(it, new_index); + } + } + } +} + + + +// Resolve the constraints from the vector and apply inhomogeneities. +template +inline +typename ProductType::type +AffineConstraints:: +resolve_vector_entry (const size_type i, + const internals::GlobalRowsFromLocal &global_rows, + const Vector &local_vector, + const std::vector &local_dof_indices, + const FullMatrix &local_matrix) const +{ + const size_type loc_row = global_rows.local_row(i); + const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities(); + typename ProductType::type val = 0; + // has a direct contribution from some local entry. If we have inhomogeneous + // constraints, compute the contribution of the inhomogeneity in the current + // row. + if (loc_row != numbers::invalid_size_type) + { + val = local_vector(loc_row); + for (size_type i=0; i::type add_this = local_vector (loc_row_q); + for (size_type k=0; k +void +AffineConstraints::distribute_local_to_global ( + const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs, + std::integral_constant) const +{ + // check whether we work on real vectors or we just used a dummy when + // calling the other function above. + const bool use_vectors = (local_vector.size() == 0 && + global_vector.size() == 0) ? false : true; + typedef typename MatrixType::value_type number; + const bool use_dealii_matrix = + std::is_same >::value; + + AssertDimension (local_matrix.n(), local_dof_indices.size()); + AssertDimension (local_matrix.m(), local_dof_indices.size()); + Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); + if (use_vectors == true) + { + AssertDimension (local_matrix.m(), local_vector.size()); + AssertDimension (global_matrix.m(), global_vector.size()); + } + Assert (lines.empty() || sorted == true, ExcMatrixNotClosed()); + + const size_type n_local_dofs = local_dof_indices.size(); + + typename internals::AffineConstraintsData::ScratchDataAccessor + scratch_data; + + internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; + global_rows.reinit(n_local_dofs); + make_sorted_row_list (local_dof_indices, global_rows); + + const size_type n_actual_dofs = global_rows.size(); + + // create arrays for the column data (indices and values) that will then be + // written into the matrix. Shortcut for deal.II sparse matrix. We can use + // the scratch data if we have a double matrix. Otherwise, we need to create + // an array in any case since we cannot know about the actual data type in + // the AffineConstraints class (unless we do cast). This involves a little + // bit of logic to determine the type of the matrix value. + std::vector &cols = scratch_data->columns; + std::vector &vals = scratch_data->values; + // create arrays for writing into the vector as well + std::vector &vector_indices = scratch_data->vector_indices; + std::vector &vector_values = scratch_data->vector_values; + vector_indices.resize(n_actual_dofs); + vector_values.resize(n_actual_dofs); + SparseMatrix *sparse_matrix + = dynamic_cast *>(&global_matrix); + if (use_dealii_matrix == false) + { + cols.resize (n_actual_dofs); + vals.resize (n_actual_dofs); + } + else + Assert (sparse_matrix != nullptr, ExcInternalError()); + + // now do the actual job. go through all the global rows that we will touch + // and call resolve_matrix_row for each of those. + size_type local_row_n = 0; + for (size_type i=0; i 0) + global_matrix.add(row, n_values, &cols[0], &vals[0], false, + true); + } + else + internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs, + local_matrix, sparse_matrix); + + // now to the vectors. besides doing the same job as we did above (i.e., + // distribute the content of the local vector into the global one), need + // to account for inhomogeneities here: this corresponds to eliminating + // the respective column in the local matrix with value on the right + // hand side. + if (use_vectors == true) + { + const typename VectorType::value_type + val = resolve_vector_entry (i, global_rows, + local_vector, + local_dof_indices, + local_matrix); + AssertIsFinite(val); + + if (val != typename VectorType::value_type ()) + { + vector_indices[local_row_n] = row; + vector_values[local_row_n] = val; + ++local_row_n; + } + } + } + // Drop the elements of vector_indices and vector_values that we do not use (we may + // always elide writing zero values to vectors) + const size_type n_local_rows = local_row_n; + vector_indices.resize(n_local_rows); + vector_values.resize(n_local_rows); + + // While the standard case is that these types are equal, they need not be, so + // only do a bulk update if they are. Note that the types in the arguments to + // add must be equal if we have a Trilinos or PETSc vector but do not have to + // be if we have a deal.II native vector: one could further optimize this for + // Vector, LinearAlgebra::distributed::vector, etc. + if (std::is_same::value) + { + global_vector.add(vector_indices, + *reinterpret_cast *>(&vector_values)); + } + else + { + for (size_type row_n=0; row_n(vector_values[row_n]); + } + } + + internals::set_matrix_diagonals (global_rows, local_dof_indices, + local_matrix, *this, + global_matrix, global_vector, use_inhomogeneities_for_rhs); +} + + + +// similar function as above, but now specialized for block matrices. See the +// other function for additional comments. +template +void +AffineConstraints:: +distribute_local_to_global ( + const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs, + std::integral_constant) const +{ + const bool use_vectors = (local_vector.size() == 0 && + global_vector.size() == 0) ? false : true; + typedef typename MatrixType::value_type number; + const bool use_dealii_matrix = + std::is_same >::value; + + AssertDimension (local_matrix.n(), local_dof_indices.size()); + AssertDimension (local_matrix.m(), local_dof_indices.size()); + Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); + Assert (global_matrix.n_block_rows() == global_matrix.n_block_cols(), + ExcNotQuadratic()); + if (use_vectors == true) + { + AssertDimension (local_matrix.m(), local_vector.size()); + AssertDimension (global_matrix.m(), global_vector.size()); + } + Assert (sorted == true, ExcMatrixNotClosed()); + + typename internals::AffineConstraintsData::ScratchDataAccessor + scratch_data; + + const size_type n_local_dofs = local_dof_indices.size(); + internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; + global_rows.reinit(n_local_dofs); + + make_sorted_row_list (local_dof_indices, global_rows); + const size_type n_actual_dofs = global_rows.size(); + + std::vector &global_indices = scratch_data->vector_indices; + if (use_vectors == true) + { + global_indices.resize(n_actual_dofs); + for (size_type i=0; i &block_starts = scratch_data->block_starts; + block_starts.resize(num_blocks+1); + internals::make_block_starts (global_matrix, global_rows, block_starts); + + std::vector &cols = scratch_data->columns; + std::vector &vals = scratch_data->values; + if (use_dealii_matrix == false) + { + cols.resize (n_actual_dofs); + vals.resize (n_actual_dofs); + } + + // the basic difference to the non-block variant from now onwards is that we + // go through the blocks of the matrix separately, which allows us to set + // the block entries individually + for (size_type block=0; block 0) + global_matrix.block(block, block_col).add(row, n_values, + &cols[0], &vals[0], + false, true); + } + else + { + SparseMatrix *sparse_matrix + = dynamic_cast *>(&global_matrix.block(block, + block_col)); + Assert (sparse_matrix != nullptr, ExcInternalError()); + internals::resolve_matrix_row (global_rows, i, start_block, + end_block, local_matrix, sparse_matrix); + } + } + + if (use_vectors == true) + { + const number val = resolve_vector_entry (i, global_rows, + local_vector, + local_dof_indices, + local_matrix); + + if (val != number ()) + global_vector(global_indices[i]) += + static_cast(val); + } + } + } + + internals::set_matrix_diagonals (global_rows, local_dof_indices, + local_matrix, *this, + global_matrix, global_vector, use_inhomogeneities_for_rhs); +} + + + +template +void +AffineConstraints::distribute_local_to_global ( + const FullMatrix &local_matrix, + const std::vector &row_indices, + const std::vector &col_indices, + MatrixType &global_matrix) const +{ + distribute_local_to_global(local_matrix, row_indices, *this, + col_indices, global_matrix); +} + + + +template +void +AffineConstraints::distribute_local_to_global ( + const FullMatrix &local_matrix, + const std::vector &row_indices, + const AffineConstraints &col_constraint_matrix, + const std::vector &col_indices, + MatrixType &global_matrix) const +{ + typedef typename MatrixType::value_type number; + + AssertDimension (local_matrix.m(), row_indices.size()); + AssertDimension (local_matrix.n(), col_indices.size()); + + const size_type n_local_row_dofs = row_indices.size(); + const size_type n_local_col_dofs = col_indices.size(); + + typename internals::AffineConstraintsData::ScratchDataAccessor + scratch_data; + internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; + global_rows.reinit(n_local_row_dofs); + internals::GlobalRowsFromLocal &global_cols = scratch_data->global_columns; + global_cols.reinit(n_local_col_dofs); + make_sorted_row_list (row_indices, global_rows); + col_constraint_matrix.make_sorted_row_list (col_indices, global_cols); + + const size_type n_actual_row_dofs = global_rows.size(); + const size_type n_actual_col_dofs = global_cols.size(); + + // create arrays for the column data (indices and values) that will then be + // written into the matrix. Shortcut for deal.II sparse matrix + std::vector &cols = scratch_data->columns; + std::vector &vals = scratch_data->values; + cols.resize(n_actual_col_dofs); + vals.resize(n_actual_col_dofs); + + // now do the actual job. + for (size_type i=0; i 0) + global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); + } +} + + + +template +void +AffineConstraints:: +add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + std::integral_constant ) const +{ + Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic()); + + const size_type n_local_dofs = local_dof_indices.size(); + bool dof_mask_is_active = false; + if (dof_mask.n_rows() == n_local_dofs) + { + dof_mask_is_active = true; + AssertDimension (dof_mask.n_cols(), n_local_dofs); + } + + internals::AffineConstraintsData::ScratchDataAccessor scratch_data; + + // if the dof mask is not active, all we have to do is to add some indices + // in a matrix format. To do this, we first create an array of all the + // indices that are to be added. these indices are the local dof indices + // plus some indices that come from constraints. + if (dof_mask_is_active == false) + { + std::vector &actual_dof_indices = scratch_data->columns; + actual_dof_indices.resize(n_local_dofs); + make_sorted_row_list (local_dof_indices, actual_dof_indices); + const size_type n_actual_dofs = actual_dof_indices.size(); + + // now add the indices we collected above to the sparsity pattern. Very + // easy here - just add the same array to all the rows... + for (size_type i=0; iglobal_rows; + global_rows.reinit(n_local_dofs); + make_sorted_row_list (local_dof_indices, global_rows); + const size_type n_actual_dofs = global_rows.size(); + + // create arrays for the column indices that will then be written into the + // sparsity pattern. + std::vector &cols = scratch_data->columns; + cols.resize(n_actual_dofs); + + for (size_type i=0; i::iterator col_ptr = cols.begin(); + const size_type row = global_rows.global_row(i); + internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs, + dof_mask, col_ptr); + + // finally, write all the information that accumulated under the given + // process into the global matrix row and into the vector + if (col_ptr != cols.begin()) + sparsity_pattern.add_entries(row, cols.begin(), col_ptr, + true); + } + internals::set_sparsity_diagonals (global_rows, local_dof_indices, + dof_mask, keep_constrained_entries, + sparsity_pattern); +} + + + + +template +void +AffineConstraints:: +add_entries_local_to_global (const std::vector &row_indices, + const std::vector &col_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask) const +{ + const size_type n_local_rows = row_indices.size(); + const size_type n_local_cols = col_indices.size(); + bool dof_mask_is_active = false; + if (dof_mask.n_rows() == n_local_rows && dof_mask.n_cols() == n_local_cols) + dof_mask_is_active = true; + + // if constrained entries should be kept, need to add rows and columns of + // those to the sparsity pattern + if (keep_constrained_entries == true) + { + for (size_type i=0; i actual_row_indices (n_local_rows); + std::vector actual_col_indices (n_local_cols); + make_sorted_row_list (row_indices, actual_row_indices); + make_sorted_row_list (col_indices, actual_col_indices); + const size_type n_actual_rows = actual_row_indices.size(); + + // now add the indices we collected above to the sparsity pattern. Very + // easy here - just add the same array to all the rows... + for (size_type i=0; i +void +AffineConstraints:: +add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityPatternType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + std::integral_constant ) const +{ + // just as the other add_entries_local_to_global function, but now + // specialized for block matrices. + Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic()); + Assert (sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(), + ExcNotQuadratic()); + + const size_type n_local_dofs = local_dof_indices.size(); + const size_type num_blocks = sparsity_pattern.n_block_rows(); + + internals::AffineConstraintsData::ScratchDataAccessor scratch_data; + + bool dof_mask_is_active = false; + if (dof_mask.n_rows() == n_local_dofs) + { + dof_mask_is_active = true; + AssertDimension (dof_mask.n_cols(), n_local_dofs); + } + + if (dof_mask_is_active == false) + { + std::vector &actual_dof_indices = scratch_data->columns; + actual_dof_indices.resize(n_local_dofs); + make_sorted_row_list (local_dof_indices, actual_dof_indices); + const size_type n_actual_dofs = actual_dof_indices.size(); + (void)n_actual_dofs; + + // additional construct that also takes care of block indices. + std::vector &block_starts = scratch_data->block_starts; + block_starts.resize(num_blocks+1); + internals::make_block_starts (sparsity_pattern, actual_dof_indices, + block_starts); + + for (size_type block=0; block::iterator index_it = actual_dof_indices.begin(); + for (size_type block_col = 0; block_colglobal_rows; + global_rows.reinit(n_local_dofs); + make_sorted_row_list (local_dof_indices, global_rows); + const size_type n_actual_dofs = global_rows.size(); + + // additional construct that also takes care of block indices. + std::vector &block_starts = scratch_data->block_starts; + block_starts.resize(num_blocks+1); + internals::make_block_starts(sparsity_pattern, global_rows, block_starts); + + std::vector &cols = scratch_data->columns; + cols.resize(n_actual_dofs); + + // the basic difference to the non-block variant from now onwards is that we + // go through the blocks of the matrix separately. + for (size_type block=0; block::iterator col_ptr = cols.begin(); + internals::resolve_matrix_row (global_rows, i, begin_block, + end_block, dof_mask, col_ptr); + + sparsity_pattern.block(block, block_col).add_entries(row, + cols.begin(), + col_ptr, + true); + } + } + } + + internals::set_sparsity_diagonals (global_rows, local_dof_indices, + dof_mask, keep_constrained_entries, + sparsity_pattern); +} + + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index 15a7e8b96a..c953d5f7b4 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -16,6 +16,7 @@ INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) SET(_unity_include_src + affine_constraints.cc block_matrix_array.cc block_sparse_matrix.cc block_sparse_matrix_ez.cc @@ -62,6 +63,7 @@ SET(_separate_src ) SET(_inst + affine_constraints.inst.in block_sparse_matrix.inst.in block_vector.inst.in chunk_sparse_matrix.inst.in diff --git a/source/lac/affine_constraints.cc b/source/lac/affine_constraints.cc new file mode 100644 index 0000000000..dc9f3a4cb9 --- /dev/null +++ b/source/lac/affine_constraints.cc @@ -0,0 +1,1582 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 1998 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +DEAL_II_NAMESPACE_OPEN + + + +// Static member variable +const Table<2,bool> AffineConstraints::default_empty_table = Table<2,bool>(); + + + +void +AffineConstraints::copy_from (const AffineConstraints &other) +{ + lines = other.lines; + lines_cache = other.lines_cache; + local_lines = other.local_lines; + sorted = other.sorted; +} + + + +bool +AffineConstraints::check_zero_weight (const std::pair &p) +{ + return (p.second == 0); +} + + + +bool +AffineConstraints::ConstraintLine::operator < (const ConstraintLine &a) const +{ + return index < a.index; +} + + + +bool +AffineConstraints::ConstraintLine::operator == (const ConstraintLine &a) const +{ + return index == a.index; +} + + + +std::size_t +AffineConstraints::ConstraintLine::memory_consumption () const +{ + return (MemoryConsumption::memory_consumption (index) + + MemoryConsumption::memory_consumption (entries) + + MemoryConsumption::memory_consumption (inhomogeneity)); +} + + + +const AffineConstraints::LineRange AffineConstraints::get_lines() const +{ + return boost::make_iterator_range(lines.begin(), lines.end()); +} + + + +bool AffineConstraints::is_consistent_in_parallel(const std::vector &locally_owned_dofs, + const IndexSet &locally_active_dofs, + const MPI_Comm mpi_communicator, + const bool verbose) const +{ + ConstraintLine empty; + empty.inhomogeneity = 0.0; + + // Helper to return a reference to the ConstraintLine object that belongs to row @p row. + // We don't want to make copies but to return a reference, we need an empty object that + // we store above. + auto get_line = [&] (const size_type row) -> const ConstraintLine& + { + const size_type line_index = calculate_line_index(row); + if (line_index >= lines_cache.size() || + lines_cache[line_index] == numbers::invalid_size_type) + { + empty.index = row; + return empty; + } + else + return lines[lines_cache[line_index]]; + }; + + // identify non-owned rows and send to owner: + std::map< unsigned int, std::vector > to_send; + + const unsigned int myid = dealii::Utilities::MPI::this_mpi_process(mpi_communicator); + const unsigned int nproc = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator); + + // We will send all locally active dofs that are not locally owned for checking. Note + // that we allow constraints to differ on locally_relevant (and not active) DoFs. + IndexSet non_owned = locally_active_dofs; + non_owned.subtract_set(locally_owned_dofs[myid]); + for (unsigned int owner=0; owner > received = Utilities::MPI::some_to_some (mpi_communicator, to_send); + + unsigned int inconsistent = 0; + + // from each processor: + for (const auto &kv : received) + { + // for each incoming line: + for (auto &lineit : kv.second) + { + const ConstraintLine &reference = get_line(lineit.index); + + if (lineit.inhomogeneity != reference.inhomogeneity) + { + ++inconsistent; + + if (verbose) + std::cout << "Proc " << myid + << " got line " << lineit.index + << " from " << kv.first + << " inhomogeneity " << lineit.inhomogeneity << " != " << reference.inhomogeneity << std::endl; + } + else if (lineit.entries != reference.entries) + { + ++inconsistent; + if (verbose) + std::cout << "Proc " << myid + << " got line " << lineit.index + << " from " << kv.first + << " wrong values!" + << std::endl; + } + } + } + + const unsigned int total = Utilities::MPI::sum(inconsistent, mpi_communicator); + if (verbose && total>0 && myid==0) + std::cout << total << " inconsistent lines discovered!" << std::endl; + return total==0; +} + + + +void +AffineConstraints::add_lines (const std::set &lines) +{ + for (std::set::const_iterator + i = lines.begin(); i != lines.end(); ++i) + add_line (*i); +} + + + +void +AffineConstraints::add_lines (const std::vector &lines) +{ + for (size_type i=0; i > &col_val_pairs) +{ + Assert (sorted==false, ExcMatrixIsClosed()); + Assert (is_constrained(line), ExcLineInexistant(line)); + + ConstraintLine *line_ptr = &lines[lines_cache[calculate_line_index(line)]]; + Assert (line_ptr->index == line, ExcInternalError()); + + // if in debug mode, check whether an entry for this column already + // exists and if its the same as the one entered at present + // + // in any case: skip this entry if an entry for this column already + // exists, since we don't want to enter it twice + for (std::vector >::const_iterator + col_val_pair = col_val_pairs.begin(); + col_val_pair!=col_val_pairs.end(); ++col_val_pair) + { + Assert (line != col_val_pair->first, + ExcMessage ("Can't constrain a degree of freedom to itself")); + + for (ConstraintLine::Entries::const_iterator + p=line_ptr->entries.begin(); + p != line_ptr->entries.end(); ++p) + if (p->first == col_val_pair->first) + { + // entry exists, break innermost loop + Assert (p->second == col_val_pair->second, + ExcEntryAlreadyExists(line, col_val_pair->first, + p->second, col_val_pair->second)); + break; + } + + line_ptr->entries.push_back (*col_val_pair); + } +} + + + +void AffineConstraints::add_selected_constraints( + const AffineConstraints &constraints, + const IndexSet &filter) +{ + if (constraints.n_constraints() == 0) + return; + + Assert (filter.size() > constraints.lines.back().index, + ExcMessage ("Filter needs to be larger than constraint matrix size.")); + for (std::vector::const_iterator line=constraints.lines.begin(); + line!=constraints.lines.end(); ++line) + if (filter.is_element(line->index)) + { + const size_type row = filter.index_within_set (line->index); + add_line (row); + set_inhomogeneity (row, line->inhomogeneity); + for (size_type i=0; ientries.size(); ++i) + if (filter.is_element(line->entries[i].first)) + add_entry (row, filter.index_within_set (line->entries[i].first), + line->entries[i].second); + } +} + + + +void AffineConstraints::close () +{ + if (sorted == true) + return; + + // sort the lines + std::sort (lines.begin(), lines.end()); + + // update list of pointers and give the vector a sharp size since we + // won't modify the size any more after this point. + { + std::vector new_lines (lines_cache.size(), + numbers::invalid_size_type); + size_type counter = 0; + for (std::vector::const_iterator line=lines.begin(); + line!=lines.end(); ++line, ++counter) + new_lines[calculate_line_index(line->index)] = counter; + std::swap (lines_cache, new_lines); + } + + // in debug mode: check whether we really set the pointers correctly. + for (size_type i=0; i::iterator line = lines.begin(); + line!=lines.end(); ++line) + // first remove zero entries. that would mean that in the linear + // constraint for a node, x_i = ax_1 + bx_2 + ..., another node times 0 + // appears. obviously, 0*something can be omitted + line->entries.erase (std::remove_if (line->entries.begin(), + line->entries.end(), + &check_zero_weight), + line->entries.end()); + + + +#ifdef DEBUG + // In debug mode we are computing an estimate for the maximum number + // of constraints so that we can bail out if there is a cycle in the + // constraints (which is easier than searching for cycles in the graph). + // + // Let us figure out the largest dof index. This is an upper bound for the + // number of constraints because it is an approximation for the number of dofs + // in our system. + size_type largest_idx = 0; + for (std::vector::iterator line = lines.begin(); + line!=lines.end(); ++line) + { + for (ConstraintLine::Entries::iterator it = line->entries.begin(); it!=line->entries.end(); ++it) + { + largest_idx=std::max(largest_idx, it->first); + } + } +#endif + + // replace references to dofs that are themselves constrained. note that + // because we may replace references to other dofs that may themselves be + // constrained to third ones, we have to iterate over all this until we + // replace no chains of constraints any more + // + // the iteration replaces references to constrained degrees of freedom by + // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2, + // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear + // twice. we will throw this duplicate out in the following step, where + // we sort the list so that throwing out duplicates becomes much more + // efficient. also, we have to do it only once, rather than in each + // iteration + size_type iteration = 0; + while (true) + { + bool chained_constraint_replaced = false; + + for (std::vector::iterator line = lines.begin(); + line!=lines.end(); ++line) + { +#ifdef DEBUG + // we need to keep track of how many replacements we do in this line, because we can + // end up in a cycle A->B->C->A without the number of entries growing. + size_type n_replacements = 0; +#endif + + // loop over all entries of this line (including ones that we + // have appended in this go around) and see whether they are + // further constrained. ignore elements that we don't store on + // the current processor + size_type entry = 0; + while (entry < line->entries.size()) + if (((local_lines.size() == 0) + || + (local_lines.is_element(line->entries[entry].first))) + && + is_constrained (line->entries[entry].first)) + { + // ok, this entry is further constrained: + chained_constraint_replaced = true; + + // look up the chain of constraints for this entry + const size_type dof_index = line->entries[entry].first; + const double weight = line->entries[entry].second; + + Assert (dof_index != line->index, + ExcMessage ("Cycle in constraints detected!")); + + const ConstraintLine *constrained_line = + &lines[lines_cache[calculate_line_index(dof_index)]]; + Assert (constrained_line->index == dof_index, + ExcInternalError()); + + // now we have to replace an entry by its expansion. we do + // that by overwriting the entry by the first entry of the + // expansion and adding the remaining ones to the end, + // where we will later process them once more + // + // we can of course only do that if the DoF that we are + // currently handle is constrained by a linear combination + // of other dofs: + if (constrained_line->entries.size() > 0) + { + for (size_type i=0; ientries.size(); ++i) + Assert (dof_index != constrained_line->entries[i].first, + ExcMessage ("Cycle in constraints detected!")); + + // replace first entry, then tack the rest to the end + // of the list + line->entries[entry] = + std::make_pair (constrained_line->entries[0].first, + constrained_line->entries[0].second * + weight); + + for (size_type i=1; ientries.size(); ++i) + line->entries.emplace_back (constrained_line->entries[i].first, + constrained_line->entries[i].second + * weight); + +#ifdef DEBUG + // keep track of how many entries we replace in this + // line. If we do more than there are constraints or + // dofs in our system, we must have a cycle. + ++n_replacements; + Assert(n_replacements/2=largest_idx) + return; // this enables us to test for this Exception. +#endif + } + else + // the DoF that we encountered is not constrained by a + // linear combination of other dofs but is equal to just + // the inhomogeneity (i.e. its chain of entries is + // empty). in that case, we can't just overwrite the + // current entry, but we have to actually eliminate it + { + line->entries.erase (line->entries.begin()+entry); + } + + line->inhomogeneity += constrained_line->inhomogeneity * + weight; + + // now that we're here, do not increase index by one but + // rather make another pass for the present entry because + // we have replaced the present entry by another one, or + // because we have deleted it and shifted all following + // ones one forward + } + else + // entry not further constrained. just move ahead by one + ++entry; + } + + // if we didn't do anything in this round, then quit the loop + if (chained_constraint_replaced == false) + break; + + // increase iteration count. note that we should not iterate more + // times than there are constraints, since this puts a natural upper + // bound on the length of constraint chains + ++iteration; + Assert (iteration <= lines.size(), ExcInternalError()); + } + + // finally sort the entries and re-scale them if necessary. in this step, + // we also throw out duplicates as mentioned above. moreover, as some + // entries might have had zero weights, we replace them by a vector with + // sharp sizes. + for (std::vector::iterator line = lines.begin(); + line!=lines.end(); ++line) + { + std::sort (line->entries.begin(), line->entries.end()); + + // loop over the now sorted list and see whether any of the entries + // references the same dofs more than once in order to find how many + // non-duplicate entries we have. This lets us allocate the correct + // amount of memory for the constraint entries. + size_type duplicates = 0; + for (size_type i=1; ientries.size(); ++i) + if (line->entries[i].first == line->entries[i-1].first) + duplicates++; + + if (duplicates > 0 || line->entries.size() < line->entries.capacity()) + { + ConstraintLine::Entries new_entries; + + // if we have no duplicates, copy verbatim the entries. this way, + // the final size is of the vector is correct. + if (duplicates == 0) + new_entries = line->entries; + else + { + // otherwise, we need to go through the list by and and + // resolve the duplicates + new_entries.reserve (line->entries.size() - duplicates); + new_entries.push_back(line->entries[0]); + for (size_type j=1; jentries.size(); ++j) + if (line->entries[j].first == line->entries[j-1].first) + { + Assert (new_entries.back().first == line->entries[j].first, + ExcInternalError()); + new_entries.back().second += line->entries[j].second; + } + else + new_entries.push_back (line->entries[j]); + + Assert (new_entries.size() == line->entries.size() - duplicates, + ExcInternalError()); + + // make sure there are really no duplicates left and that the + // list is still sorted + for (size_type j=1; j new_entries[j-1].first, + ExcInternalError()); + } + } + + // replace old list of constraints for this dof by the new one + line->entries.swap (new_entries); + } + + // finally do the following check: if the sum of weights for the + // constraints is close to one, but not exactly one, then rescale all + // the weights so that they sum up to 1. this adds a little numerical + // stability and avoids all sorts of problems where the actual value + // is close to, but not quite what we expected + // + // the case where the weights don't quite sum up happens when we + // compute the interpolation weights "on the fly", i.e. not from + // precomputed tables. in this case, the interpolation weights are + // also subject to round-off + double sum = 0; + for (size_type i=0; ientries.size(); ++i) + sum += line->entries[i].second; + if ((sum != 1.0) && (std::fabs (sum-1.) < 1.e-13)) + { + for (size_type i=0; ientries.size(); ++i) + line->entries[i].second /= sum; + line->inhomogeneity /= sum; + } + } // end of loop over all constraint lines + +#ifdef DEBUG + // if in debug mode: check that no dof is constrained to another dof that + // is also constrained. exclude dofs from this check whose constraint + // lines are not stored on the local processor + for (std::vector::const_iterator line=lines.begin(); + line!=lines.end(); ++line) + for (ConstraintLine::Entries::const_iterator + entry=line->entries.begin(); + entry!=line->entries.end(); ++entry) + if ((local_lines.size() == 0) + || + (local_lines.is_element(entry->first))) + { + // make sure that entry->first is not the index of a line itself + const bool is_circle = is_constrained(entry->first); + Assert (is_circle == false, + ExcDoFConstrainedToConstrainedDoF(line->index, entry->first)); + } +#endif + + sorted = true; +} + + + +void +AffineConstraints::merge (const AffineConstraints &other_constraints, + const MergeConflictBehavior merge_conflict_behavior, + const bool allow_different_local_lines) +{ + (void) allow_different_local_lines; + Assert(allow_different_local_lines || + local_lines == other_constraints.local_lines, + ExcMessage("local_lines for this and the other objects are not the same " + "although allow_different_local_lines is false.")); + + // store the previous state with respect to sorting + const bool object_was_sorted = sorted; + sorted = false; + + // first action is to fold into the present object possible constraints + // in the second object. we don't strictly need to do this any more since + // the AffineConstraints container has learned to deal with chains of + // constraints in the close() function, but we have traditionally done + // this and it's not overly hard to do. + // + // for this, loop over all constraints and replace the constraint lines + // with a new one where constraints are replaced if necessary. + ConstraintLine::Entries tmp; + for (std::vector::iterator line=lines.begin(); + line!=lines.end(); ++line) + { + tmp.clear (); + for (size_type i=0; ientries.size(); ++i) + { + // if the present dof is not stored, or not constrained, or if we won't take the + // constraint from the other object, then simply copy it over + if ((other_constraints.local_lines.size() != 0 + && other_constraints.local_lines.is_element(line->entries[i].first) == false) + || + other_constraints.is_constrained(line->entries[i].first) == false + || + ((merge_conflict_behavior != right_object_wins) + && other_constraints.is_constrained(line->entries[i].first) + && this->is_constrained(line->entries[i].first))) + tmp.push_back(line->entries[i]); + else + // otherwise resolve further constraints by replacing the old + // entry by a sequence of new entries taken from the other + // object, but with multiplied weights + { + const ConstraintLine::Entries *other_line + = other_constraints.get_constraint_entries (line->entries[i].first); + Assert (other_line != nullptr, + ExcInternalError()); + + const double weight = line->entries[i].second; + + for (ConstraintLine::Entries::const_iterator j=other_line->begin(); + j!=other_line->end(); ++j) + tmp.emplace_back(j->first, j->second*weight); + + line->inhomogeneity + += other_constraints.get_inhomogeneity(line->entries[i].first) * + weight; + } + } + // finally exchange old and newly resolved line + line->entries.swap (tmp); + } + + if (local_lines.size() != 0) + local_lines.add_indices(other_constraints.local_lines); + + { + // do not bother to resize the lines cache exactly since it is pretty + // cheap to adjust it along the way. + std::fill(lines_cache.begin(), lines_cache.end(), numbers::invalid_size_type); + + // reset lines_cache for our own constraints + size_type index = 0; + for (std::vector::const_iterator line = lines.begin(); + line != lines.end(); ++line) + { + size_type local_line_no = calculate_line_index(line->index); + if (local_line_no >= lines_cache.size()) + lines_cache.resize(local_line_no+1, numbers::invalid_size_type); + lines_cache[local_line_no] = index++; + } + + // Add other_constraints to lines cache and our list of constraints + for (std::vector::const_iterator line = other_constraints.lines.begin(); + line != other_constraints.lines.end(); ++line) + { + const size_type local_line_no = calculate_line_index(line->index); + if (local_line_no >= lines_cache.size()) + { + lines_cache.resize(local_line_no+1, numbers::invalid_size_type); + lines.push_back(*line); + lines_cache[local_line_no] = index++; + } + else if (lines_cache[local_line_no] == numbers::invalid_size_type) + { + // there are no constraints for that line yet + lines.push_back(*line); + AssertIndexRange(local_line_no, lines_cache.size()); + lines_cache[local_line_no] = index++; + } + else + { + // we already store that line + switch (merge_conflict_behavior) + { + case no_conflicts_allowed: + AssertThrow (false, + ExcDoFIsConstrainedFromBothObjects (line->index)); + break; + + case left_object_wins: + // ignore this constraint + break; + + case right_object_wins: + AssertIndexRange(local_line_no, lines_cache.size()); + lines[lines_cache[local_line_no]] = *line; + break; + + default: + Assert (false, ExcNotImplemented()); + } + } + } + + // check that we set the pointers correctly + for (size_type i=0; i::iterator i = lines.begin(); + i != lines.end(); ++i) + { + i->index += offset; + for (ConstraintLine::Entries::iterator + j = i->entries.begin(); + j != i->entries.end(); ++j) + j->first += offset; + } + +#ifdef DEBUG + // make sure that lines, lines_cache and local_lines + // are still linked correctly + for (size_type i=0; i tmp; + lines.swap (tmp); + } + + { + std::vector tmp; + lines_cache.swap (tmp); + } + + sorted = false; +} + + + +void AffineConstraints::reinit (const IndexSet &local_constraints) +{ + local_lines = local_constraints; + + // make sure the IndexSet is compressed. Otherwise this can lead to crashes + // that are hard to find (only happen in release mode). + // see tests/mpi/affine_constraints_crash_01 + local_lines.compress(); + + clear(); +} + + + +void AffineConstraints::condense (SparsityPattern &sparsity) const +{ + Assert (sorted == true, ExcMatrixNotClosed()); + Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed()); + Assert (sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic()); + + // store for each index whether it must be distributed or not. If entry + // is numbers::invalid_unsigned_int, no distribution is necessary. + // otherwise, the number states which line in the constraint matrix + // handles this index + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); + + for (size_type c=0; cis_valid_entry()); + ++entry) + { + const size_type column = entry->column(); + + if (distribute[column] != numbers::invalid_size_type) + { + // distribute entry at regular row @p{row} and irregular + // column sparsity.colnums[j] + for (size_type q=0; + q!=lines[distribute[column]].entries.size(); + ++q) + sparsity.add (row, + lines[distribute[column]].entries[q].first); + } + } + } + else + // row must be distributed. note that here the present row is not + // touched (unlike above) + { + for (SparsityPattern::iterator entry = sparsity.begin(row); + (entry != sparsity.end(row)) && entry->is_valid_entry(); ++entry) + { + const size_type column = entry->column(); + if (distribute[column] == numbers::invalid_size_type) + // distribute entry at irregular row @p{row} and regular + // column sparsity.colnums[j] + for (size_type q=0; + q!=lines[distribute[row]].entries.size(); ++q) + sparsity.add (lines[distribute[row]].entries[q].first, + column); + else + // distribute entry at irregular row @p{row} and irregular + // column sparsity.get_column_numbers()[j] + for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + for (size_type q=0; + q!=lines[distribute[column]].entries.size(); ++q) + sparsity.add (lines[distribute[row]].entries[p].first, + lines[distribute[column]].entries[q].first); + } + } + } + + sparsity.compress(); +} + + + + +void AffineConstraints::condense (DynamicSparsityPattern &sparsity) const +{ + Assert (sorted == true, ExcMatrixNotClosed()); + Assert (sparsity.n_rows() == sparsity.n_cols(), + ExcNotQuadratic()); + + // store for each index whether it must be distributed or not. If entry + // is numbers::invalid_unsigned_int, no distribution is necessary. + // otherwise, the number states which line in the constraint matrix + // handles this index + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); + + for (size_type c=0; c distribute (sparsity.n_rows(), + numbers::invalid_size_type); + + for (size_type c=0; c + block_index = index_mapping.global_to_local(row); + const size_type block_row = block_index.first; + + if (distribute[row] == numbers::invalid_size_type) + // regular line. loop over all columns and see whether this column + // must be distributed + { + + // to loop over all entries in this row, we have to loop over all + // blocks in this blockrow and the corresponding row therein + for (size_type block_col=0; block_colis_valid_entry(); + ++entry) + { + const size_type global_col + = index_mapping.local_to_global(block_col, entry->column()); + + if (distribute[global_col] != numbers::invalid_size_type) + // distribute entry at regular row @p{row} and + // irregular column global_col + { + for (size_type q=0; + q!=lines[distribute[global_col]].entries.size(); ++q) + sparsity.add (row, + lines[distribute[global_col]].entries[q].first); + } + } + } + } + else + { + // row must be distributed. split the whole row into the chunks + // defined by the blocks + for (size_type block_col=0; block_colis_valid_entry(); + ++entry) + { + const size_type global_col + = index_mapping.local_to_global (block_col, entry->column()); + + if (distribute[global_col] == numbers::invalid_size_type) + // distribute entry at irregular row @p{row} and + // regular column global_col. + { + for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q) + sparsity.add (lines[distribute[row]].entries[q].first, global_col); + } + else + // distribute entry at irregular row @p{row} and + // irregular column @p{global_col} + { + for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q) + sparsity.add (lines[distribute[row]].entries[p].first, + lines[distribute[global_col]].entries[q].first); + } + } + } + } + } + + sparsity.compress(); +} + + + + +void AffineConstraints::condense (BlockDynamicSparsityPattern &sparsity) const +{ + Assert (sorted == true, ExcMatrixNotClosed()); + Assert (sparsity.n_rows() == sparsity.n_cols(), + ExcNotQuadratic()); + Assert (sparsity.n_block_rows() == sparsity.n_block_cols(), + ExcNotQuadratic()); + Assert (sparsity.get_column_indices() == sparsity.get_row_indices(), + ExcNotQuadratic()); + + const BlockIndices & + index_mapping = sparsity.get_column_indices(); + + const size_type n_blocks = sparsity.n_block_rows(); + + // store for each index whether it must be distributed or not. If entry + // is numbers::invalid_unsigned_int, no distribution is necessary. + // otherwise, the number states which line in the constraint matrix + // handles this index + std::vector distribute (sparsity.n_rows(), + numbers::invalid_size_type); + + for (size_type c=0; c(c); + + const size_type n_rows = sparsity.n_rows(); + for (size_type row=0; row + block_index = index_mapping.global_to_local(row); + const size_type block_row = block_index.first; + const size_type local_row = block_index.second; + + if (distribute[row] == numbers::invalid_size_type) + // regular line. loop over all columns and see whether this column + // must be distributed. note that as we proceed to distribute cols, + // the loop over cols may get longer. + // + // don't try to be clever here as in the algorithm for the + // DynamicSparsityPattern, as that would be much more + // complicated here. after all, we know that compressed patterns + // are inefficient... + { + + // to loop over all entries in this row, we have to loop over all + // blocks in this blockrow and the corresponding row therein + for (size_type block_col=0; block_col::const_iterator i=lines.begin(); + i!=lines.end(); ++i) + // use static cast, since typeof(size)==std::size_t, which is != + // size_type on AIX + return_value = std::max(return_value, + static_cast(i->entries.size())); + + return return_value; +} + + + +bool AffineConstraints::has_inhomogeneities () const +{ + for (std::vector::const_iterator i=lines.begin(); + i!=lines.end(); ++i) + if (i->inhomogeneity != 0.) + return true; + + return false; +} + + +void AffineConstraints::print (std::ostream &out) const +{ + for (size_type i=0; i!=lines.size(); ++i) + { + // output the list of constraints as pairs of dofs and their weights + if (lines[i].entries.size() > 0) + { + for (size_type j=0; j 0) + for (size_type j=0; j" << lines[i].entries[j].first + << "; // weight: " + << lines[i].entries[j].second + << "\n"; + else + out << " " << lines[i].index << "\n"; + } + out << "}" << std::endl; +} + + + +std::size_t +AffineConstraints::memory_consumption () const +{ + return (MemoryConsumption::memory_consumption (lines) + + MemoryConsumption::memory_consumption (lines_cache) + + MemoryConsumption::memory_consumption (sorted) + + MemoryConsumption::memory_consumption (local_lines)); +} + + + +void +AffineConstraints::resolve_indices (std::vector &indices) const +{ + const unsigned int indices_size = indices.size(); + const std::vector > *line_ptr; + for (unsigned int i=0; isize(); + for (unsigned int j=0; j::iterator it; + it = std::unique(indices.begin(),indices.end()); + indices.resize(it-indices.begin()); +} + + + +// explicit instantiations +// +// define a list of functions for vectors and matrices, respectively, where +// the vector/matrix can be replaced using a preprocessor variable +// VectorType/MatrixType. note that we need a space between "VectorType" and +// ">" to disambiguate ">>" when VectorType trails in an angle bracket + +// TODO: The way we define all the instantiations is probably not the very +// best one. Try to find a better description. + +#define VECTOR_FUNCTIONS(VectorType) \ + template void AffineConstraints::condense(const VectorType &uncondensed,\ + VectorType &condensed) const;\ + template void AffineConstraints::condense(VectorType &vec) const;\ + template void AffineConstraints:: \ + distribute_local_to_global (const Vector &, \ + const std::vector &, \ + VectorType &, \ + const FullMatrix &) const;\ + template void AffineConstraints:: \ + distribute_local_to_global (const Vector &, \ + const std::vector &, \ + const std::vector &, \ + VectorType &, \ + const FullMatrix &, \ + bool) const + +#define PARALLEL_VECTOR_FUNCTIONS(VectorType) \ + template void AffineConstraints:: \ + distribute_local_to_global (const Vector &, \ + const std::vector &, \ + VectorType &, \ + const FullMatrix &) const;\ + template void AffineConstraints:: \ + distribute_local_to_global (const Vector &, \ + const std::vector &, \ + const std::vector &, \ + VectorType &, \ + const FullMatrix &, \ + bool) const + +#ifdef DEAL_II_WITH_PETSC +VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector); +VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector); +#endif + +#ifdef DEAL_II_WITH_TRILINOS +PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::Vector); +PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::BlockVector); +#endif + +#define MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \ + template void AffineConstraints:: \ + distribute_local_to_global (const FullMatrix &, \ + const Vector &, \ + const std::vector &, \ + MatrixType &, \ + VectorType &, \ + bool , \ + std::integral_constant) const +#define MATRIX_FUNCTIONS(MatrixType,VectorScalar) \ + template void AffineConstraints:: \ + distribute_local_to_global > (const FullMatrix &, \ + const Vector &, \ + const std::vector &, \ + MatrixType &, \ + Vector &, \ + bool , \ + std::integral_constant) const + +#define BLOCK_MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \ + template void AffineConstraints:: \ + distribute_local_to_global (const FullMatrix &, \ + const Vector &, \ + const std::vector &, \ + MatrixType &, \ + VectorType &, \ + bool , \ + std::integral_constant) const +#define BLOCK_MATRIX_FUNCTIONS(MatrixType) \ + template void AffineConstraints:: \ + distribute_local_to_global > (const FullMatrix &, \ + const Vector &, \ + const std::vector &, \ + MatrixType &, \ + Vector &, \ + bool , \ + std::integral_constant) const + +MATRIX_FUNCTIONS(FullMatrix,double); +MATRIX_FUNCTIONS(FullMatrix,float); +MATRIX_FUNCTIONS(FullMatrix,std::complex); +MATRIX_FUNCTIONS(FullMatrix >,std::complex); + +MATRIX_FUNCTIONS(SparseMatrix,double); +MATRIX_FUNCTIONS(SparseMatrix,float); +MATRIX_FUNCTIONS(SparseMatrix,std::complex); +MATRIX_FUNCTIONS(SparseMatrix,std::complex); +MATRIX_FUNCTIONS(SparseMatrix >,std::complex); +MATRIX_FUNCTIONS(SparseMatrix >,std::complex); + +MATRIX_FUNCTIONS(SparseMatrixEZ,double); +MATRIX_FUNCTIONS(SparseMatrixEZ,float); +MATRIX_FUNCTIONS(ChunkSparseMatrix,double); +MATRIX_FUNCTIONS(ChunkSparseMatrix,float); + + +BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix); +BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix); +BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix, BlockVector); +BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix, BlockVector); + +// BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrixEZ); +// BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrixEZ, Vector); + +#ifdef DEAL_II_WITH_PETSC +MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix,PetscScalar); +MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix,PetscScalar); +BLOCK_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix); +MATRIX_VECTOR_FUNCTIONS(PETScWrappers::MPI::SparseMatrix, PETScWrappers::MPI::Vector); +MATRIX_VECTOR_FUNCTIONS(PETScWrappers::SparseMatrix, PETScWrappers::MPI::Vector); +BLOCK_MATRIX_VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix,PETScWrappers::MPI::BlockVector); +#endif + +#ifdef DEAL_II_WITH_TRILINOS +MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix,double); +BLOCK_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix); +MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::SparseMatrix, TrilinosWrappers::MPI::Vector); +BLOCK_MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix, TrilinosWrappers::MPI::BlockVector); +#endif + + +#define SPARSITY_FUNCTIONS(SparsityPatternType) \ + template void AffineConstraints::add_entries_local_to_global ( \ + const std::vector &, \ + SparsityPatternType &, \ + const bool, \ + const Table<2,bool> &, \ + std::integral_constant) const; \ + template void AffineConstraints::add_entries_local_to_global ( \ + const std::vector &, \ + const std::vector &, \ + SparsityPatternType &, \ + const bool, \ + const Table<2,bool> &) const + +#define BLOCK_SPARSITY_FUNCTIONS(SparsityPatternType) \ + template void AffineConstraints::add_entries_local_to_global ( \ + const std::vector &, \ + SparsityPatternType &, \ + const bool, \ + const Table<2,bool> &, \ + std::integral_constant) const; \ + template void AffineConstraints::add_entries_local_to_global ( \ + const std::vector &, \ + const std::vector &, \ + SparsityPatternType &, \ + const bool, \ + const Table<2,bool> &) const + +SPARSITY_FUNCTIONS(SparsityPattern); +SPARSITY_FUNCTIONS(DynamicSparsityPattern); +BLOCK_SPARSITY_FUNCTIONS(BlockSparsityPattern); +BLOCK_SPARSITY_FUNCTIONS(BlockDynamicSparsityPattern); + +#ifdef DEAL_II_WITH_TRILINOS +SPARSITY_FUNCTIONS(TrilinosWrappers::SparsityPattern); +BLOCK_SPARSITY_FUNCTIONS(TrilinosWrappers::BlockSparsityPattern); +#endif + + +#define ONLY_MATRIX_FUNCTIONS(MatrixType) \ + template void AffineConstraints::distribute_local_to_global ( \ + const FullMatrix &, \ + const std::vector &, \ + const std::vector &, \ + MatrixType &) const; \ + template void AffineConstraints::distribute_local_to_global ( \ + const FullMatrix &, \ + const std::vector &, \ + const AffineConstraints &, \ + const std::vector &, \ + MatrixType &) const + +ONLY_MATRIX_FUNCTIONS(FullMatrix); +ONLY_MATRIX_FUNCTIONS(FullMatrix); +ONLY_MATRIX_FUNCTIONS(SparseMatrix); +ONLY_MATRIX_FUNCTIONS(SparseMatrix); +ONLY_MATRIX_FUNCTIONS(MatrixBlock >); +ONLY_MATRIX_FUNCTIONS(MatrixBlock >); +ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix); +ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix); + +#ifdef DEAL_II_WITH_TRILINOS +ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix); +ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix); +#endif + +#ifdef DEAL_II_WITH_PETSC +ONLY_MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix); +ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix); +ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix); +#endif + +#include "affine_constraints.inst" + +// allocate scratch data. Cannot use the generic template instantiation +// because we need to provide an initializer object of type +// internals::AffineConstraintsData that can be passed to the +// constructor of scratch_data (it won't allow one to be constructed in place). +namespace internals +{ +#define SCRATCH_INITIALIZER(MatrixScalar,VectorScalar,Name) \ + AffineConstraintsData::ScratchData scratch_data_initializer_##Name; \ + template <> Threads::ThreadLocalStorage::ScratchData> \ + AffineConstraintsData::scratch_data(scratch_data_initializer_##Name) + + SCRATCH_INITIALIZER(double,double,dd); + SCRATCH_INITIALIZER(float,float,ff); + SCRATCH_INITIALIZER(std::complex,std::complex,zz); + SCRATCH_INITIALIZER(std::complex,std::complex,cc); + SCRATCH_INITIALIZER(double,std::complex,dz); + SCRATCH_INITIALIZER(float,std::complex,fc); +#undef SCRATCH_INITIALIZER +} + + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/lac/affine_constraints.inst.in b/source/lac/affine_constraints.inst.in new file mode 100644 index 0000000000..a214045886 --- /dev/null +++ b/source/lac/affine_constraints.inst.in @@ -0,0 +1,114 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2013 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) +{ + template void AffineConstraints::condense >(const T &, T &) const; + template void AffineConstraints::condense >(T &vec) const; + template void AffineConstraints::distribute_local_to_global > ( + const Vector&, const std::vector &, T &, const FullMatrix&) const; + template void AffineConstraints::distribute_local_to_global > ( + const Vector&, const std::vector &, const std::vector &, T &, const FullMatrix&, bool) const; + template void AffineConstraints::set_zero >(T &) const; +} + + +for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) +{ + template void AffineConstraints::condense >(const LinearAlgebra::distributed::T &, LinearAlgebra::distributed::T &) const; + template void AffineConstraints::condense >(LinearAlgebra::distributed::T &vec) const; + + template + void + AffineConstraints::distribute_local_to_global > + (const Vector&, + const std::vector &, + LinearAlgebra::distributed::T &, + const FullMatrix&) const; + + template + void + AffineConstraints::distribute_local_to_global > + (const Vector&, + const std::vector &, + const std::vector &, + LinearAlgebra::distributed::T &, + const FullMatrix&, + bool) const; + + template + void + AffineConstraints::distribute_local_to_global > > + (const FullMatrix &, + const std::vector< size_type > &, + DiagonalMatrix > &) const; + + template + void + AffineConstraints::distribute_local_to_global >, LinearAlgebra::distributed::T > + (const FullMatrix &, + const Vector&, + const std::vector< size_type > &, + DiagonalMatrix > &, + LinearAlgebra::distributed::T&, + bool, + std::integral_constant) const; + + template + void + AffineConstraints::distribute_local_to_global >, T > + (const FullMatrix &, + const Vector&, + const std::vector< size_type > &, + DiagonalMatrix > &, + T&, + bool, + std::integral_constant) const; + + template + void + AffineConstraints::set_zero >(LinearAlgebra::distributed::T &) const; +} + + +for (V: EXTERNAL_PARALLEL_VECTORS) +{ + template void AffineConstraints::set_zero(V&) const; +} + + +for (S : REAL_SCALARS) +{ + template void AffineConstraints::condense(SparseMatrix&) const; + template void AffineConstraints::condense(BlockSparseMatrix&) const; +} + + +for (S1 : REAL_SCALARS; S2 : REAL_SCALARS) +{ + template void AffineConstraints::condense >(SparseMatrix&, Vector&) const; + template void AffineConstraints::condense >(BlockSparseMatrix&, BlockVector&) const; +} + +for (S1 : COMPLEX_SCALARS) +{ + template void AffineConstraints::condense >(SparseMatrix&, Vector&) const; +} + + +for (Vec : VECTOR_TYPES) +{ + template void AffineConstraints::distribute(Vec &) const; +}