From 109d83dc2b4914b8c7b8de72211e19ca67adc290 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Tue, 3 Dec 2013 13:36:22 +0000 Subject: [PATCH] Add scratch data field in constraint matrix to avoid frequent re-allocation that can affect parallel performance (memory allocation is not scalable). It is made thread-local and we need to make sure that no function used inside distribute_local_to_global spawns tasks by itself. There is a bool state variable to provide some security against race conditions, even though it is subject to race conditions, too. git-svn-id: https://svn.dealii.org/trunk@31851 0785d39b-7218-0410-832d-ea1e28bc413d --- .../include/deal.II/lac/constraint_matrix.h | 1699 +++++++---------- .../deal.II/lac/constraint_matrix.templates.h | 192 +- .../deal.II/numerics/vector_tools.templates.h | 218 +-- .../deal.II/assemble_block_matrix_parallel.cc | 469 +++++ .../assemble_block_matrix_parallel.output | 7 + tests/deal.II/assemble_matrix_parallel.cc | 462 +++++ tests/deal.II/assemble_matrix_parallel.output | 7 + 7 files changed, 1794 insertions(+), 1260 deletions(-) create mode 100644 tests/deal.II/assemble_block_matrix_parallel.cc create mode 100644 tests/deal.II/assemble_block_matrix_parallel.output create mode 100644 tests/deal.II/assemble_matrix_parallel.cc create mode 100644 tests/deal.II/assemble_matrix_parallel.output diff --git a/deal.II/include/deal.II/lac/constraint_matrix.h b/deal.II/include/deal.II/lac/constraint_matrix.h index 7020842c8a..e6cecd0ceb 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.h @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -150,58 +151,40 @@ public: typedef types::global_dof_index size_type; /** - * An enum that describes what should - * happen if the two ConstraintMatrix - * objects involved in a call to the - * merge() function happen to have - * constraints on the same degrees of - * freedom. + * An enum that describes what should happen if the two ConstraintMatrix + * objects involved in a call to the merge() function happen to have + * constraints on the same degrees of freedom. */ enum MergeConflictBehavior { /** - * Throw an exception if the two - * objects concerned have - * conflicting constraints on the - * same degree of freedom. + * Throw an exception if the two objects concerned have conflicting + * constraints on the same degree of freedom. */ no_conflicts_allowed, /** - * In an operation - * cm1.merge(cm2), if - * cm1 and - * cm2 have - * constraints on the same degree - * of freedom, take the one from - * cm1. + * In an operation cm1.merge(cm2), if cm1 and + * cm2 have constraints on the same degree of freedom, take + * the one from cm1. */ left_object_wins, /** - * In an operation - * cm1.merge(cm2), if - * cm1 and - * cm2 have - * constraints on the same degree - * of freedom, take the one from - * cm2. + * In an operation cm1.merge(cm2), if cm1 and + * cm2 have constraints on the same degree of freedom, take + * the one from cm2. */ right_object_wins }; /** - * Constructor. The supplied IndexSet - * defines which indices might be - * constrained inside this - * ConstraintMatrix. In a calculation - * with a - * parallel::distributed::DoFHandler one - * should use locally_relevant_dofs. The - * IndexSet allows the ConstraintMatrix - * to safe memory. Otherwise internal - * data structures for all possible - * indices will be created. + * Constructor. The supplied IndexSet defines which indices might be + * constrained inside this ConstraintMatrix. In a calculation with a + * parallel::distributed::DoFHandler one should use + * locally_relevant_dofs. The IndexSet allows the ConstraintMatrix to safe + * memory. Otherwise internal data structures for all possible indices will + * be created. */ ConstraintMatrix (const IndexSet &local_constraints = IndexSet()); @@ -211,25 +194,18 @@ public: ConstraintMatrix (const ConstraintMatrix &constraint_matrix); /** - * Reinit the ConstraintMatrix object and - * supply an IndexSet with lines that may - * be constrained. This function is only - * relevant in the distributed case to - * supply a different IndexSet. Otherwise - * this routine is equivalent to calling - * clear(). See the constructor for - * details. + * Reinit the ConstraintMatrix object and supply an IndexSet with lines that + * may be constrained. This function is only relevant in the distributed + * case to supply a different IndexSet. Otherwise this routine is equivalent + * to calling clear(). See the constructor for details. */ void reinit (const IndexSet &local_constraints = IndexSet()); /** - * Determines if we can store a - * constraint for the given @p - * line_index. This routine only matters - * in the distributed case and checks if - * the IndexSet allows storage of this - * line. Always returns true if not in - * the distributed case. + * Determines if we can store a constraint for the given @p line_index. This + * routine only matters in the distributed case and checks if the IndexSet + * allows storage of this line. Always returns true if not in the + * distributed case. */ bool can_store_line (const size_type line_index) const; @@ -242,39 +218,23 @@ public: const IndexSet & get_local_lines() const; /** - * This function copies the content of @p - * constraints_in with DoFs that are - * element of the IndexSet @p - * filter. Elements that are not present - * in the IndexSet are ignored. All DoFs - * will be transformed to local index - * space of the filter, both the - * constrained DoFs and the other DoFs - * these entries are constrained to. The - * local index space of the filter is a - * contiguous numbering of all (global) - * DoFs that are elements in the - * filter. - * - * If, for example, the filter represents - * the range [10,20), and the - * constraint matrix @p constraints_in - * includes the global indices - * {7,13,14}, the indices - * {3,4} are added to the - * calling constraint matrix (since 13 - * and 14 are elements in the filter and - * element 13 is the fourth element in - * the index, and 14 is the fifth). - * - * This function provides an easy way to - * create a ConstraintMatrix for certain - * vector components in a vector-valued - * problem from a full ConstraintMatrix, - * i.e. extracting a diagonal subblock - * from a larger ConstraintMatrix. The - * block is specified by the IndexSet - * argument. + * This function copies the content of @p constraints_in with DoFs that are + * element of the IndexSet @p filter. Elements that are not present in the + * IndexSet are ignored. All DoFs will be transformed to local index space + * of the filter, both the constrained DoFs and the other DoFs these entries + * are constrained to. The local index space of the filter is a contiguous + * numbering of all (global) DoFs that are elements in the filter. + * + * If, for example, the filter represents the range [10,20), and + * the constraint matrix @p constraints_in includes the global indices + * {7,13,14}, the indices {3,4} are added to the calling + * constraint matrix (since 13 and 14 are elements in the filter and element + * 13 is the fourth element in the index, and 14 is the fifth). + * + * This function provides an easy way to create a ConstraintMatrix for + * certain vector components in a vector-valued problem from a full + * ConstraintMatrix, i.e. extracting a diagonal subblock from a larger + * ConstraintMatrix. The block is specified by the IndexSet argument. */ void add_selected_constraints (const ConstraintMatrix &constraints_in, const IndexSet &filter); @@ -285,220 +245,147 @@ public: */ /** - * Add a new line to the matrix. If the - * line already exists, then the function - * simply returns without doing anything. + * Add a new line to the matrix. If the line already exists, then the + * function simply returns without doing anything. */ void add_line (const size_type line); /** - * Call the first add_line() function for - * every index i for which - * lines[i] is true. - * - * This function essentially exists to - * allow adding several constraints of - * the form xi=0 all at once, where - * the set of indices i for which these - * constraints should be added are given - * by the argument of this function. On - * the other hand, just as if the - * single-argument add_line() function - * were called repeatedly, the - * constraints can later be modified to - * include linear dependencies using the - * add_entry() function as well as - * inhomogeneities using + * Call the first add_line() function for every index i for + * which lines[i] is true. + * + * This function essentially exists to allow adding several constraints of + * the form xi=0 all at once, where the set of indices + * i for which these constraints should be added are given by the + * argument of this function. On the other hand, just as if the + * single-argument add_line() function were called repeatedly, the + * constraints can later be modified to include linear dependencies using + * the add_entry() function as well as inhomogeneities using * set_inhomogeneity(). */ void add_lines (const std::vector &lines); /** - * Call the first add_line() function for - * every index i that + * Call the first add_line() function for every index i that * appears in the argument. * - * This function essentially exists to - * allow adding several constraints of - * the form xi=0 all at once, where - * the set of indices i for which these - * constraints should be added are given - * by the argument of this function. On - * the other hand, just as if the - * single-argument add_line() function - * were called repeatedly, the - * constraints can later be modified to - * include linear dependencies using the - * add_entry() function as well as - * inhomogeneities using + * This function essentially exists to allow adding several constraints of + * the form xi=0 all at once, where the set of indices + * i for which these constraints should be added are given by the + * argument of this function. On the other hand, just as if the + * single-argument add_line() function were called repeatedly, the + * constraints can later be modified to include linear dependencies using + * the add_entry() function as well as inhomogeneities using * set_inhomogeneity(). */ void add_lines (const std::set &lines); /** - * Call the first add_line() function for - * every index i that + * Call the first add_line() function for every index i that * appears in the argument. * - * This function essentially exists to - * allow adding several constraints of - * the form xi=0 all at once, where - * the set of indices i for which these - * constraints should be added are given - * by the argument of this function. On - * the other hand, just as if the - * single-argument add_line() function - * were called repeatedly, the - * constraints can later be modified to - * include linear dependencies using the - * add_entry() function as well as - * inhomogeneities using + * This function essentially exists to allow adding several constraints of + * the form xi=0 all at once, where the set of indices + * i for which these constraints should be added are given by the + * argument of this function. On the other hand, just as if the + * single-argument add_line() function were called repeatedly, the + * constraints can later be modified to include linear dependencies using + * the add_entry() function as well as inhomogeneities using * set_inhomogeneity(). */ void add_lines (const IndexSet &lines); /** - * Add an entry to a given - * line. The list of lines is - * searched from the back to the - * front, so clever programming - * would add a new line (which is - * pushed to the back) and - * immediately afterwards fill - * the entries of that line. This - * way, no expensive searching is - * needed. - * - * If an entry with the same - * indices as the one this - * function call denotes already - * exists, then this function - * simply returns provided that - * the value of the entry is the - * same. Thus, it does no harm to - * enter a constraint twice. + * Add an entry to a given line. The list of lines is searched from the back + * to the front, so clever programming would add a new line (which is pushed + * to the back) and immediately afterwards fill the entries of that + * line. This way, no expensive searching is needed. + * + * If an entry with the same indices as the one this function call denotes + * already exists, then this function simply returns provided that the value + * of the entry is the same. Thus, it does no harm to enter a constraint + * twice. */ void add_entry (const size_type line, const size_type column, const double value); /** - * Add a whole series of entries, - * denoted by pairs of column indices - * and values, to a line of - * constraints. This function is - * equivalent to calling the preceding - * function several times, but is - * faster. + * Add a whole series of entries, denoted by pairs of column indices and + * values, to a line of constraints. This function is equivalent to calling + * the preceding function several times, but is faster. */ void add_entries (const size_type line, const std::vector > &col_val_pairs); /** - * Set an imhomogeneity to the - * constraint line i, according - * to the discussion in the general - * class description. + * Set an imhomogeneity to the constraint line i, according to the + * discussion in the general class description. * - * @note the line needs to be added with - * one of the add_line() calls first. + * @note the line needs to be added with one of the add_line() calls first. */ void set_inhomogeneity (const size_type line, const double value); /** - * Close the filling of entries. Since - * the lines of a matrix of this type - * are usually filled in an arbitrary - * order and since we do not want to - * use associative constainers to store - * the lines, we need to sort the lines - * and within the lines the columns - * before usage of the matrix. This is - * done through this function. - * - * Also, zero entries are discarded, - * since they are not needed. - * - * After closing, no more entries are - * accepted. If the object was already - * closed, then this function returns - * immediately. - * - * This function also resolves chains - * of constraints. For example, degree - * of freedom 13 may be constrained to - * u13=u3/2+u7/2 while degree of - * freedom 7 is itself constrained as - * u7=u2/2+u4/2. Then, the - * resolution will be that - * u13=u3/2+u2/4+u4/4. Note, - * however, that cycles in this graph - * of constraints are not allowed, - * i.e. for example u4 may not be - * constrained, directly or indirectly, - * to u13 again. + * Close the filling of entries. Since the lines of a matrix of this type + * are usually filled in an arbitrary order and since we do not want to use + * associative constainers to store the lines, we need to sort the lines and + * within the lines the columns before usage of the matrix. This is done + * through this function. + * + * Also, zero entries are discarded, since they are not needed. + * + * After closing, no more entries are accepted. If the object was already + * closed, then this function returns immediately. + * + * This function also resolves chains of constraints. For example, degree of + * freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ + * while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2} + * + \frac{u_4}{2}$. Then, the resolution will be that $u_{13} = + * \frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that + * cycles in this graph of constraints are not allowed, i.e. for example + * $u_4$ may not be constrained, directly or indirectly, to $u_{13}$ again. */ void close (); /** - * Merge the constraints represented by - * the object given as argument into - * the constraints represented by this - * object. Both objects may or may not - * be closed (by having their function - * close() called before). If this - * object was closed before, then it - * will be closed afterwards as - * well. Note, however, that if the - * other argument is closed, then - * merging may be significantly faster. - * - * Using the default value of the second - * arguments, the constraints in each of - * the two objects (the old one - * represented by this object and the - * argument) may not refer to the same - * degree of freedom, i.e. a degree of - * freedom that is constrained in one - * object may not be constrained in the - * second. If this is nevertheless the - * case, an exception is thrown. However, - * this behavior can be changed by - * providing a different value for the - * second argument. + * Merge the constraints represented by the object given as argument into + * the constraints represented by this object. Both objects may or may not + * be closed (by having their function close() called before). If this + * object was closed before, then it will be closed afterwards as + * well. Note, however, that if the other argument is closed, then merging + * may be significantly faster. + * + * Using the default value of the second arguments, the constraints in each + * of the two objects (the old one represented by this object and the + * argument) may not refer to the same degree of freedom, i.e. a degree of + * freedom that is constrained in one object may not be constrained in the + * second. If this is nevertheless the case, an exception is + * thrown. However, this behavior can be changed by providing a different + * value for the second argument. */ void merge (const ConstraintMatrix &other_constraints, const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed); /** - * Shift all entries of this matrix - * down @p offset rows and over @p - * offset columns. + * Shift all entries of this matrix down @p offset rows and over @p offset + * columns. * - * This function is useful if you are - * building block matrices, where all - * blocks are built by the same - * DoFHandler object, i.e. the matrix - * size is larger than the number of - * degrees of freedom. Since several - * matrix rows and columns correspond - * to the same degrees of freedom, - * you'd generate several constraint - * objects, then shift them, and - * finally merge() them together - * again. + * This function is useful if you are building block matrices, where all + * blocks are built by the same DoFHandler object, i.e. the matrix size is + * larger than the number of degrees of freedom. Since several matrix rows + * and columns correspond to the same degrees of freedom, you'd generate + * several constraint objects, then shift them, and finally merge() them + * together again. */ void shift (const size_type offset); /** - * Clear all entries of this - * matrix. Reset the flag determining - * whether new entries are accepted or - * not. + * Clear all entries of this matrix. Reset the flag determining whether new + * entries are accepted or not. * - * This function may be called also on - * objects which are empty or already + * This function may be called also on objects which are empty or already * cleared. */ void clear (); @@ -514,51 +401,37 @@ public: */ /** - * Return number of constraints stored in - * this matrix. + * Return number of constraints stored in this matrix. */ size_type n_constraints () const; /** - * Return whether the degree of freedom - * with number @p index is a + * Return whether the degree of freedom with number @p index is a * constrained one. * - * Note that if close() was called - * before, then this function is - * significantly faster, since then the - * constrained degrees of freedom are - * sorted and we can do a binary - * search, while before close() was - * called, we have to perform a linear - * search through all entries. + * Note that if close() was called before, then this function is + * significantly faster, since then the constrained degrees of freedom are + * sorted and we can do a binary search, while before close() was called, we + * have to perform a linear search through all entries. */ bool is_constrained (const size_type index) const; /** - * Return whether the dof is - * constrained, and whether it is - * constrained to only one other degree - * of freedom with weight one. The - * function therefore returns whether - * the degree of freedom would simply - * be eliminated in favor of exactly - * one other degree of freedom. - * - * The function returns @p false if - * either the degree of freedom is not - * constrained at all, or if it is - * constrained to more than one other - * degree of freedom, or if it is - * constrained to only one degree of - * freedom but with a weight different - * from one. + * Return whether the dof is constrained, and whether it is constrained to + * only one other degree of freedom with weight one. The function therefore + * returns whether the degree of freedom would simply be eliminated in favor + * of exactly one other degree of freedom. + * + * The function returns @p false if either the degree of freedom is not + * constrained at all, or if it is constrained to more than one other degree + * of freedom, or if it is constrained to only one degree of freedom but + * with a weight different from one. */ bool is_identity_constrained (const size_type index) const; /** - * Return whether the two given degrees of freedom are linked by an - * equality constraint that either constrains index1 to be so that + * Return whether the two given degrees of freedom are linked by an equality + * constraint that either constrains index1 to be so that * index1=index2 or constrains index2 so that * index2=index1. */ @@ -566,105 +439,78 @@ public: const size_type index2) const; /** - * Return the maximum number of other - * dofs that one dof is constrained - * to. For example, in 2d a hanging - * node is constrained only to its two - * neighbors, so the returned value - * would be 2. However, for higher - * order elements and/or higher - * dimensions, or other types of - * constraints, this number is no more - * obvious. + * Return the maximum number of other dofs that one dof is constrained + * to. For example, in 2d a hanging node is constrained only to its two + * neighbors, so the returned value would be 2. However, for higher order + * elements and/or higher dimensions, or other types of constraints, this + * number is no more obvious. * - * The name indicates that within the - * system matrix, references to a - * constrained node are indirected to - * the nodes it is constrained to. + * The name indicates that within the system matrix, references to a + * constrained node are indirected to the nodes it is constrained to. */ size_type max_constraint_indirections () const; /** - * Returns true in case the - * dof is constrained and there is a - * non-trivial inhomogeneous valeus set - * to the dof. + * Returns true in case the dof is constrained and there is a + * non-trivial inhomogeneous valeus set to the dof. */ bool is_inhomogeneously_constrained (const size_type index) const; /** - * Returns false if all - * constraints in the ConstraintMatrix - * are homogeneous ones, and - * true if there is at least - * one inhomogeneity. + * Returns false if all constraints in the ConstraintMatrix are + * homogeneous ones, and true if there is at least one + * inhomogeneity. */ bool has_inhomogeneities () const; /** - * Returns a pointer to the the vector of - * entries if a line is constrained, and a - * zero pointer in case the dof is not - * constrained. + * Returns a pointer to the the vector of entries if a line is constrained, + * and a zero pointer in case the dof is not constrained. */ const std::vector > * get_constraint_entries (const size_type line) const; /** - * Returns the value of the inhomogeneity - * stored in the constrained dof @p - * line. Unconstrained dofs also return a - * zero value. + * Returns the value of the inhomogeneity stored in the constrained dof @p + * line. Unconstrained dofs also return a zero value. */ double get_inhomogeneity (const size_type line) const; /** - * Print the constraint lines. Mainly - * for debugging purposes. + * Print the constraint lines. Mainly for debugging purposes. * - * This function writes out all entries - * in the constraint matrix lines with - * their value in the form row col - * : value. Unconstrained lines - * containing only one identity entry - * are not stored in this object and - * are not printed. + * This function writes out all entries in the constraint matrix lines with + * their value in the form row col : value. Unconstrained lines + * containing only one identity entry are not stored in this object and are + * not printed. */ void print (std::ostream &) const; /** - * Write the graph of constraints in - * 'dot' format. 'dot' is a program - * that can take a list of nodes and - * produce a graphical representation - * of the graph of constrained degrees - * of freedom and the degrees of - * freedom they are constrained to. + * Write the graph of constraints in 'dot' format. 'dot' is a program that + * can take a list of nodes and produce a graphical representation of the + * graph of constrained degrees of freedom and the degrees of freedom they + * are constrained to. * - * The output of this function can be - * used as input to the 'dot' program - * that can convert the graph into a - * graphical representation in - * postscript, png, xfig, and a number - * of other formats. + * The output of this function can be used as input to the 'dot' program + * that can convert the graph into a graphical representation in postscript, + * png, xfig, and a number of other formats. * - * This function exists mostly for - * debugging purposes. + * This function exists mostly for debugging purposes. */ void write_dot (std::ostream &) const; /** - * Determine an estimate for the memory - * consumption (in bytes) of this + * Determine an estimate for the memory consumption (in bytes) of this * object. */ std::size_t memory_consumption () const; /** * Add the constraint indices associated to the indices in the given vector. - * After a call to this function, the indices vector contains the - * initial elements and all the associated constrained indices. This - * function sorts the elements and suppresses duplicates. + * After a call to this function, the indices vector contains the initial + * elements and all the associated constrained indices. This function sorts + * the elements and suppresses duplicates. */ void resolve_indices(std::vector &indices) const; @@ -678,150 +524,100 @@ public: */ /** - * Condense a given sparsity - * pattern. This function assumes the - * uncondensed matrix struct to be - * compressed and the one to be filled - * to be empty. The condensed structure - * is compressed afterwards. + * Condense a given sparsity pattern. This function assumes the uncondensed + * matrix struct to be compressed and the one to be filled to be empty. The + * condensed structure is compressed afterwards. * - * The constraint matrix object must be - * closed to call this function. + * The constraint matrix object must be closed to call this function. * - * @note The hanging nodes are - * completely eliminated from the - * linear system referring to - * condensed. Therefore, the - * dimension of condensed is - * the dimension of - * uncondensed minus the - * number of constrained degrees of - * freedom. + * @note The hanging nodes are completely eliminated from the linear system + * referring to condensed. Therefore, the dimension of + * condensed is the dimension of uncondensed minus the + * number of constrained degrees of freedom. */ void condense (const SparsityPattern &uncondensed, SparsityPattern &condensed) const; /** - * This function does much the same as - * the above one, except that it - * condenses the matrix struct - * 'in-place'. It does not remove - * nonzero entries from the matrix but - * adds those needed for the process of - * distribution of the constrained - * degrees of freedom. + * This function does much the same as the above one, except that it + * condenses the matrix struct 'in-place'. It does not remove nonzero + * entries from the matrix but adds those needed for the process of + * distribution of the constrained degrees of freedom. * - * Since this function adds new nonzero - * entries to the sparsity pattern, the - * argument must not be - * compressed. However the constraint - * matrix must be closed. The matrix - * struct is compressed at the end of - * the function. + * Since this function adds new nonzero entries to the sparsity pattern, the + * argument must not be compressed. However the constraint matrix must be + * closed. The matrix struct is compressed at the end of the function. */ void condense (SparsityPattern &sparsity) const; /** - * Same function as above, but - * condenses square block sparsity - * patterns. + * Same function as above, but condenses square block sparsity patterns. */ void condense (BlockSparsityPattern &sparsity) const; /** - * Same function as above, but - * condenses square compressed sparsity + * Same function as above, but condenses square compressed sparsity * patterns. * - * Given the data structure used by - * CompressedSparsityPattern, this - * function becomes quadratic in the - * number of degrees of freedom for - * large problems and can dominate - * setting up linear systems when - * several hundred thousand or millions - * of unknowns are involved and for - * problems with many nonzero elements - * per row (for example for - * vector-valued problems or hp finite - * elements). In this case, it is - * advisable to use the - * CompressedSetSparsityPattern class - * instead, see for example @ref - * step_27 "step-27", or to use the - * CompressedSimpleSparsityPattern - * class, see for example @ref step_31 - * "step-31". + * Given the data structure used by CompressedSparsityPattern, this function + * becomes quadratic in the number of degrees of freedom for large problems + * and can dominate setting up linear systems when several hundred thousand + * or millions of unknowns are involved and for problems with many nonzero + * elements per row (for example for vector-valued problems or hp finite + * elements). In this case, it is advisable to use the + * CompressedSetSparsityPattern class instead, see for example @ref step_27 + * "step-27", or to use the CompressedSimpleSparsityPattern class, see for + * example @ref step_31 "step-31". */ void condense (CompressedSparsityPattern &sparsity) const; /** - * Same function as above, but - * condenses compressed sparsity - * patterns, which are based on the - * std::set container. + * Same function as above, but condenses compressed sparsity patterns, which + * are based on the std::set container. */ void condense (CompressedSetSparsityPattern &sparsity) const; /** - * Same function as above, but - * condenses compressed sparsity - * patterns, which are based on the - * ''simple'' aproach. + * Same function as above, but condenses compressed sparsity patterns, which + * are based on the ''simple'' aproach. */ void condense (CompressedSimpleSparsityPattern &sparsity) const; /** - * Same function as above, but - * condenses square compressed sparsity + * Same function as above, but condenses square compressed sparsity * patterns. * - * Given the data structure used by - * BlockCompressedSparsityPattern, this - * function becomes quadratic in the - * number of degrees of freedom for - * large problems and can dominate - * setting up linear systems when - * several hundred thousand or millions - * of unknowns are involved and for - * problems with many nonzero elements - * per row (for example for - * vector-valued problems or hp finite - * elements). In this case, it is - * advisable to use the - * BlockCompressedSetSparsityPattern - * class instead, see for example @ref - * step_27 "step-27" and @ref step_31 - * "step-31". + * Given the data structure used by BlockCompressedSparsityPattern, this + * function becomes quadratic in the number of degrees of freedom for large + * problems and can dominate setting up linear systems when several hundred + * thousand or millions of unknowns are involved and for problems with many + * nonzero elements per row (for example for vector-valued problems or hp + * finite elements). In this case, it is advisable to use the + * BlockCompressedSetSparsityPattern class instead, see for example @ref + * step_27 "step-27" and @ref step_31 "step-31". */ void condense (BlockCompressedSparsityPattern &sparsity) const; /** - * Same function as above, but - * condenses square compressed sparsity + * Same function as above, but condenses square compressed sparsity * patterns. */ void condense (BlockCompressedSetSparsityPattern &sparsity) const; /** - * Same function as above, but - * condenses square compressed sparsity + * Same function as above, but condenses square compressed sparsity * patterns. */ void condense (BlockCompressedSimpleSparsityPattern &sparsity) const; /** - * Condense a given matrix. The - * associated matrix struct should be - * condensed and compressed. It is the - * user's responsibility to guarantee - * that all entries in the @p condensed - * matrix be zero! + * Condense a given matrix. The associated matrix struct should be condensed + * and compressed. It is the user's responsibility to guarantee that all + * entries in the @p condensed matrix be zero! * - * The constraint matrix object must be - * closed to call this function. + * The constraint matrix object must be closed to call this function. * * @deprecated The functions converting an uncondensed matrix into * its condensed form are deprecated. Use the functions doing the @@ -832,42 +628,30 @@ public: SparseMatrix &condensed) const DEAL_II_DEPRECATED; /** - * This function does much the same as - * the above one, except that it - * condenses the matrix 'in-place'. See - * the general documentation of this + * This function does much the same as the above one, except that it + * condenses the matrix 'in-place'. See the general documentation of this * class for more detailed information. */ template void condense (SparseMatrix &matrix) const; /** - * Same function as above, but - * condenses square block sparse - * matrices. + * Same function as above, but condenses square block sparse matrices. */ template void condense (BlockSparseMatrix &matrix) const; /** - * Condense the given vector @p - * uncondensed into @p condensed. It is - * the user's responsibility to - * guarantee that all entries of @p - * condensed be zero. Note that this - * function does not take any - * inhomogeneity into account and - * throws an exception in case there - * are any inhomogeneities. Use - * the function using both a matrix and - * vector for that case. - * - * The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same - * interface. + * Condense the given vector @p uncondensed into @p condensed. It is the + * user's responsibility to guarantee that all entries of @p condensed be + * zero. Note that this function does not take any inhomogeneity into + * account and throws an exception in case there are any + * inhomogeneities. Use the function using both a matrix and vector for that + * case. + * + * The @p VectorType may be a Vector, Vector, + * BlockVector<...>, a PETSc or Trilinos vector wrapper class, or + * any other type having the same interface. * * @deprecated The functions converting an uncondensed matrix into * its condensed form are deprecated. Use the functions doing the @@ -878,36 +662,25 @@ public: VectorType &condensed) const DEAL_II_DEPRECATED; /** - * Condense the given vector - * in-place. The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same - * interface. Note that this function - * does not take any inhomogeneity into - * account and throws an exception in - * case there are any - * inhomogeneities. Use the function - * using both a matrix and vector for - * that case. + * Condense the given vector in-place. The @p VectorType may be a + * Vector, Vector, BlockVector<...>, a PETSc or + * Trilinos vector wrapper class, or any other type having the same + * interface. Note that this function does not take any inhomogeneity into + * account and throws an exception in case there are any + * inhomogeneities. Use the function using both a matrix and vector for that + * case. */ template void condense (VectorType &vec) const; /** - * Condense a given matrix and a given - * vector. The associated matrix struct - * should be condensed and - * compressed. It is the user's - * responsibility to guarantee that all - * entries in the @p condensed matrix - * and vector be zero! This function is - * the appropriate choice for applying - * inhomogeneous constraints. + * Condense a given matrix and a given vector. The associated matrix struct + * should be condensed and compressed. It is the user's responsibility to + * guarantee that all entries in the @p condensed matrix and vector be zero! + * This function is the appropriate choice for applying inhomogeneous + * constraints. * - * The constraint matrix object must be - * closed to call this function. + * The constraint matrix object must be closed to call this function. * * @deprecated The functions converting an uncondensed matrix into * its condensed form are deprecated. Use the functions doing the @@ -920,36 +693,27 @@ public: VectorType &condensed_vector) const DEAL_II_DEPRECATED; /** - * This function does much the same as - * the above one, except that it - * condenses matrix and vector - * 'in-place'. See the general - * documentation of this class for more - * detailed information. + * This function does much the same as the above one, except that it + * condenses matrix and vector 'in-place'. See the general documentation of + * this class for more detailed information. */ template void condense (SparseMatrix &matrix, VectorType &vector) const; /** - * Same function as above, but - * condenses square block sparse - * matrices and vectors. + * Same function as above, but condenses square block sparse matrices and + * vectors. */ template void condense (BlockSparseMatrix &matrix, BlockVectorType &vector) const; /** - * Sets the values of all constrained - * DoFs in a vector to zero. - * The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a - * PETSc or Trilinos vector - * wrapper class, or any other - * type having the same - * interface. + * Sets the values of all constrained DoFs in a vector to zero. The @p + * VectorType may be a Vector, Vector, + * BlockVector<...>, a PETSc or Trilinos vector wrapper class, or + * any other type having the same interface. */ template void set_zero (VectorType &vec) const; @@ -964,59 +728,40 @@ public: */ /** - * This function takes a vector of - * local contributions (@p - * local_vector) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global vector. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as @p - * local_vector and @p - * local_dof_indices have the same - * number of elements, this function is - * happy with whatever it is - * given. - * - * In contrast to the similar function - * in the DoFAccessor class, this - * function also takes care of - * constraints, i.e. if one of the - * elements of @p local_dof_indices - * belongs to a constrained node, then - * rather than writing the - * corresponding element of @p - * local_vector into @p global_vector, - * the element is distributed to the - * entries in the global vector to - * which this particular degree of - * freedom is constrained. - * - * Thus, by using this function to - * distribute local contributions to the - * global object, one saves the call to - * the condense function after the - * vectors and matrices are fully - * assembled. On the other hand, by - * consequence, the function does not - * only write into the entries enumerated - * by the @p local_dof_indices array, but - * also (possibly) others as necessary. - * - * Note that this function will apply all - * constraints as if they were - * homogeneous. For correctly setting - * inhomogeneous constraints, use the - * similar function with a matrix - * argument or the function with both - * matrix and vector arguments. - * - * @note This function is not - * thread-safe, so you will need to make - * sure that only one process at a time - * calls this function. + * This function takes a vector of local contributions (@p local_vector) + * corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global vector. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as @p local_vector and @p + * local_dof_indices have the same number of elements, this function is + * happy with whatever it is given. + * + * In contrast to the similar function in the DoFAccessor class, this + * function also takes care of constraints, i.e. if one of the elements of + * @p local_dof_indices belongs to a constrained node, then rather than + * writing the corresponding element of @p local_vector into @p + * global_vector, the element is distributed to the entries in the global + * vector to which this particular degree of freedom is constrained. + * + * Thus, by using this function to distribute local contributions to the + * global object, one saves the call to the condense function after the + * vectors and matrices are fully assembled. On the other hand, by + * consequence, the function does not only write into the entries enumerated + * by the @p local_dof_indices array, but also (possibly) others as + * necessary. + * + * Note that this function will apply all constraints as if they were + * homogeneous. For correctly setting inhomogeneous constraints, use the + * similar function with a matrix argument or the function with both matrix + * and vector arguments. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global vector allows + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. */ template void @@ -1025,75 +770,47 @@ public: OutVector &global_vector) const; /** - * This function takes a vector of - * local contributions (@p - * local_vector) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global vector. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as @p - * local_vector and @p - * local_dof_indices have the same - * number of elements, this function is - * happy with whatever it is - * given. - * - * In contrast to the similar function in - * the DoFAccessor class, this function - * also takes care of constraints, - * i.e. if one of the elements of @p - * local_dof_indices belongs to a - * constrained node, then rather than - * writing the corresponding element of - * @p local_vector into @p global_vector, - * the element is distributed to the - * entries in the global vector to which - * this particular degree of freedom is - * constrained. - * - * Thus, by using this function to - * distribute local contributions to the - * global object, one saves the call to - * the condense function after the - * vectors and matrices are fully - * assembled. On the other hand, by - * consequence, the function does not - * only write into the entries enumerated - * by the @p local_dof_indices array, but - * also (possibly) others as - * necessary. This includes writing into - * diagonal elements of the matrix if the - * corresponding degree of freedom is - * constrained. + * This function takes a vector of local contributions (@p local_vector) + * corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global vector. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as @p local_vector and @p + * local_dof_indices have the same number of elements, this function is + * happy with whatever it is given. * - * The fourth argument - * local_matrix is intended to - * be used in case one wants to apply - * inhomogeneous constraints on the - * vector only. Such a situation could be - * where one wants to assemble of a right - * hand side vector on a problem with - * inhomogeneous constraints, but the - * global matrix has been assembled - * previously. A typical example of this - * is a time stepping algorithm where the - * stiffness matrix is assembled once, - * and the right hand side updated every - * time step. Note that, however, the - * entries in the columns of the local - * matrix have to be exactly the same as - * those that have been written into the - * global matrix. Otherwise, this - * function will not be able to correctly - * handle inhomogeneities. - * - * @note This function is not - * thread-safe, so you will need to make - * sure that only one process at a time - * calls this function. + * In contrast to the similar function in the DoFAccessor class, this + * function also takes care of constraints, i.e. if one of the elements of + * @p local_dof_indices belongs to a constrained node, then rather than + * writing the corresponding element of @p local_vector into @p + * global_vector, the element is distributed to the entries in the global + * vector to which this particular degree of freedom is constrained. + * + * Thus, by using this function to distribute local contributions to the + * global object, one saves the call to the condense function after the + * vectors and matrices are fully assembled. On the other hand, by + * consequence, the function does not only write into the entries enumerated + * by the @p local_dof_indices array, but also (possibly) others as + * necessary. This includes writing into diagonal elements of the matrix if + * the corresponding degree of freedom is constrained. + * + * The fourth argument local_matrix is intended to be used in case + * one wants to apply inhomogeneous constraints on the vector only. Such a + * situation could be where one wants to assemble of a right hand side + * vector on a problem with inhomogeneous constraints, but the global matrix + * has been assembled previously. A typical example of this is a time + * stepping algorithm where the stiffness matrix is assembled once, and the + * right hand side updated every time step. Note that, however, the entries + * in the columns of the local matrix have to be exactly the same as those + * that have been written into the global matrix. Otherwise, this function + * will not be able to correctly handle inhomogeneities. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global vector allows + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. */ template void @@ -1103,8 +820,7 @@ public: const FullMatrix &local_matrix) const; /** - * Enter a single value into a - * result vector, obeying constraints. + * Enter a single value into a result vector, obeying constraints. */ template void @@ -1113,45 +829,32 @@ public: VectorType &global_vector) const; /** - * This function takes a pointer to a - * vector of local contributions (@p - * local_vector) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global vector. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as the - * entries in @p local_dof_indices - * indicate reasonable global vector - * entries, this function is happy with - * whatever it is given. - * - * If one of the elements of @p - * local_dof_indices belongs to a - * constrained node, then rather than - * writing the corresponding element of - * @p local_vector into @p - * global_vector, the element is - * distributed to the entries in the - * global vector to which this - * particular degree of freedom is - * constrained. - * - * Thus, by using this function to - * distribute local contributions to - * the global object, one saves the - * call to the condense function after - * the vectors and matrices are fully - * assembled. Note that this function - * completely ignores inhomogeneous - * constraints. - * - * @note This function is not - * thread-safe, so you will need to - * make sure that only one process at a - * time calls this function. + * This function takes a pointer to a vector of local contributions (@p + * local_vector) corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global vector. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as the entries in @p + * local_dof_indices indicate reasonable global vector entries, this + * function is happy with whatever it is given. + * + * If one of the elements of @p local_dof_indices belongs to a constrained + * node, then rather than writing the corresponding element of @p + * local_vector into @p global_vector, the element is distributed to the + * entries in the global vector to which this particular degree of freedom + * is constrained. + * + * Thus, by using this function to distribute local contributions to the + * global object, one saves the call to the condense function after the + * vectors and matrices are fully assembled. Note that this function + * completely ignores inhomogeneous constraints. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global vector allows + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. */ template @@ -1162,71 +865,45 @@ public: VectorType &global_vector) const; /** - * This function takes a matrix of - * local contributions (@p - * local_matrix) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global matrix. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as @p - * local_matrix and @p - * local_dof_indices have the same - * number of elements, this function is + * This function takes a matrix of local contributions (@p local_matrix) + * corresponding to the degrees of freedom indices given in @p + * local_dof_indices and distributes them to the global matrix. In most + * cases, these local contributions will be the result of an integration + * over a cell or face of a cell. However, as long as @p local_matrix and @p + * local_dof_indices have the same number of elements, this function is * happy with whatever it is given. * - * In contrast to the similar function - * in the DoFAccessor class, this - * function also takes care of - * constraints, i.e. if one of the - * elements of @p local_dof_indices - * belongs to a constrained node, then - * rather than writing the - * corresponding element of @p - * local_matrix into @p global_matrix, - * the element is distributed to the - * entries in the global matrix to - * which this particular degree of - * freedom is constrained. - * - * With this scheme, we never write - * into rows or columns of constrained - * degrees of freedom. In order to make - * sure that the resulting matrix can - * still be inverted, we need to do - * something with the diagonal elements - * corresponding to constrained - * nodes. Thus, if a degree of freedom - * in @p local_dof_indices is - * constrained, we distribute the - * corresponding entries in the matrix, - * but also add the absolute value of - * the diagonal entry of the local - * matrix to the corresponding entry in - * the global matrix. Since the exact - * value of the diagonal element is not - * important (the value of the - * respective degree of freedom will be - * overwritten by the distribute() call - * later on anyway), this guarantees - * that the diagonal entry is always - * non-zero, positive, and of the same - * order of magnitude as the other + * In contrast to the similar function in the DoFAccessor class, this + * function also takes care of constraints, i.e. if one of the elements of + * @p local_dof_indices belongs to a constrained node, then rather than + * writing the corresponding element of @p local_matrix into @p + * global_matrix, the element is distributed to the entries in the global + * matrix to which this particular degree of freedom is constrained. + * + * With this scheme, we never write into rows or columns of constrained + * degrees of freedom. In order to make sure that the resulting matrix can + * still be inverted, we need to do something with the diagonal elements + * corresponding to constrained nodes. Thus, if a degree of freedom in @p + * local_dof_indices is constrained, we distribute the corresponding entries + * in the matrix, but also add the absolute value of the diagonal entry of + * the local matrix to the corresponding entry in the global matrix. Since + * the exact value of the diagonal element is not important (the value of + * the respective degree of freedom will be overwritten by the distribute() + * call later on anyway), this guarantees that the diagonal entry is always + * non-zero, positive, and of the same order of magnitude as the other * entries of the matrix. * - * Thus, by using this function to - * distribute local contributions to - * the global object, one saves the - * call to the condense function after - * the vectors and matrices are fully - * assembled. + * Thus, by using this function to distribute local contributions to the + * global object, one saves the call to the condense function after the + * vectors and matrices are fully assembled. * - * @note This function is not - * thread-safe, so you will need to - * make sure that only one process at a - * time calls this function. + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global matrix allows + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. */ template void @@ -1235,9 +912,7 @@ public: MatrixType &global_matrix) const; /** - * Does the same as the function - * above but can treat non - * quadratic matrices. + * Does the same as the function above but can treat non quadratic matrices. */ template void @@ -1247,22 +922,19 @@ public: MatrixType &global_matrix) const; /** - * This function simultaneously - * writes elements into matrix - * and vector, according to the - * constraints specified by the - * calling ConstraintMatrix. This - * function can correctly handle - * inhomogeneous constraints as - * well. For the parameter - * use_inhomogeneities_for_rhs - * see the documentation in @ref - * constraints module. + * This function simultaneously writes elements into matrix and vector, + * according to the constraints specified by the calling + * ConstraintMatrix. This function can correctly handle inhomogeneous + * constraints as well. For the parameter use_inhomogeneities_for_rhs see + * the documentation in @ref constraints module. * - * @note This function is not - * thread-safe, so you will need to - * make sure that only one process at a - * time calls this function. + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global matrix and vector allow + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. */ template void @@ -1274,97 +946,67 @@ public: bool use_inhomogeneities_for_rhs = false) const; /** - * Do a similar operation as the - * distribute_local_to_global() function - * that distributes writing entries into - * a matrix for constrained degrees of - * freedom, except that here we don't - * write into a matrix but only allocate + * Do a similar operation as the distribute_local_to_global() function that + * distributes writing entries into a matrix for constrained degrees of + * freedom, except that here we don't write into a matrix but only allocate * sparsity pattern entries. * - * As explained in the - * @ref hp_paper "hp paper" - * and in step-27, - * first allocating a sparsity pattern - * and later coming back and allocating - * additional entries for those matrix - * entries that will be written to due to - * the elimination of constrained degrees - * of freedom (using - * ConstraintMatrix::condense() ), can be - * a very expensive procedure. It is - * cheaper to allocate these entries - * right away without having to do a - * second pass over the sparsity pattern - * object. This function does exactly + * As explained in the @ref hp_paper "hp paper" and in step-27, first + * allocating a sparsity pattern and later coming back and allocating + * additional entries for those matrix entries that will be written to due + * to the elimination of constrained degrees of freedom (using + * ConstraintMatrix::condense() ), can be a very expensive procedure. It is + * cheaper to allocate these entries right away without having to do a + * second pass over the sparsity pattern object. This function does exactly * that. * - * Because the function only allocates - * entries in a sparsity pattern, all it - * needs to know are the degrees of - * freedom that couple to each - * other. Unlike the previous function, - * no actual values are written, so the - * second input argument is not necessary - * here. - * - * The third argument to this function, - * keep_constrained_entries determines - * whether the function shall allocate - * entries in the sparsity pattern at - * all for entries that will later be - * set to zero upon condensation of the - * matrix. These entries are necessary - * if the matrix is built - * unconstrained, and only later - * condensed. They are not necessary if - * the matrix is built using the - * distribute_local_to_global() - * function of this class which - * distributes entries right away when - * copying a local matrix into a global - * object. The default of this argument - * is true, meaning to allocate the few - * entries that may later be set to - * zero. - * - * By default, the function adds - * entries for all pairs of indices - * given in the first argument to the - * sparsity pattern (unless - * keep_constrained_entries is - * false). However, sometimes one would - * like to only add a subset of all of - * these pairs. In that case, the last - * argument can be used which specifies - * a boolean mask which of the pairs of - * indices should be considered. If the - * mask is false for a pair of indices, - * then no entry will be added to the - * sparsity pattern for this pair, - * irrespective of whether one or both - * of the indices correspond to - * constrained degrees of freedom. - * - * This function is not typically called - * from user code, but is used in the - * DoFTools::make_sparsity_pattern() - * function when passed a constraint + * Because the function only allocates entries in a sparsity pattern, all it + * needs to know are the degrees of freedom that couple to each + * other. Unlike the previous function, no actual values are written, so the + * second input argument is not necessary here. + * + * The third argument to this function, keep_constrained_entries determines + * whether the function shall allocate entries in the sparsity pattern at + * all for entries that will later be set to zero upon condensation of the + * matrix. These entries are necessary if the matrix is built unconstrained, + * and only later condensed. They are not necessary if the matrix is built + * using the distribute_local_to_global() function of this class which + * distributes entries right away when copying a local matrix into a global + * object. The default of this argument is true, meaning to allocate the few + * entries that may later be set to zero. + * + * By default, the function adds entries for all pairs of indices given in + * the first argument to the sparsity pattern (unless + * keep_constrained_entries is false). However, sometimes one would like to + * only add a subset of all of these pairs. In that case, the last argument + * can be used which specifies a boolean mask which of the pairs of indices + * should be considered. If the mask is false for a pair of indices, then no + * entry will be added to the sparsity pattern for this pair, irrespective + * of whether one or both of the indices correspond to constrained degrees + * of freedom. + * + * This function is not typically called from user code, but is used in the + * DoFTools::make_sparsity_pattern() function when passed a constraint * matrix object. + * + * @note This function in itself is thread-safe, i.e., it works properly + * also when several threads call it simultaneously. However, the function + * call is only thread-safe if the underlying global sparsity pattern allows + * for simultaneous access and the access is not to rows with the same + * global index at the same time. This needs to be made sure from the + * caller's site. There is no locking mechanism inside this method to + * prevent data races. */ template void add_entries_local_to_global (const std::vector &local_dof_indices, SparsityType &sparsity_pattern, const bool keep_constrained_entries = true, - const Table<2,bool> &dof_mask = default_empty_table) const; + const Table<2,bool> &dof_mask = default_empty_table) const; /** - * Similar to the other function, - * but for non-quadratic sparsity - * patterns. + * Similar to the other function, but for non-quadratic sparsity patterns. */ - template void add_entries_local_to_global (const std::vector &row_indices, @@ -1374,38 +1016,23 @@ public: const Table<2,bool> &dof_mask = default_empty_table) const; /** - * This function imports values from a - * global vector (@p global_vector) by - * applying the constraints to a vector - * of local values, expressed in - * iterator format. In most cases, the - * local values will be identified by - * the local dof values on a - * cell. However, as long as the - * entries in @p local_dof_indices - * indicate reasonable global vector - * entries, this function is happy with - * whatever it is given. - * - * If one of the elements of @p - * local_dof_indices belongs to a - * constrained node, then rather than - * writing the corresponding element of - * @p global_vector into @p - * local_vector, the constraints are - * resolved as the respective - * distribute function does, i.e., the - * local entry is constructed from the - * global entries to which this - * particular degree of freedom is + * This function imports values from a global vector (@p global_vector) by + * applying the constraints to a vector of local values, expressed in + * iterator format. In most cases, the local values will be identified by + * the local dof values on a cell. However, as long as the entries in @p + * local_dof_indices indicate reasonable global vector entries, this + * function is happy with whatever it is given. + * + * If one of the elements of @p local_dof_indices belongs to a constrained + * node, then rather than writing the corresponding element of @p + * global_vector into @p local_vector, the constraints are resolved as the + * respective distribute function does, i.e., the local entry is constructed + * from the global entries to which this particular degree of freedom is * constrained. * - * In contrast to the similar function - * get_dof_values in the DoFAccessor - * class, this function does not need - * the constrained values to be - * correctly set (i.e., distribute to - * be called). + * In contrast to the similar function get_dof_values in the DoFAccessor + * class, this function does not need the constrained values to be correctly + * set (i.e., distribute to be called). */ template @@ -1425,41 +1052,29 @@ public: */ /** - * Re-distribute the elements of the - * vector @p condensed to @p - * uncondensed. It is the user's - * responsibility to guarantee that all + * Re-distribute the elements of the vector @p condensed to @p + * uncondensed. It is the user's responsibility to guarantee that all * entries of @p uncondensed be zero! * - * This function undoes the action of - * @p condense somehow, but it should - * be noted that it is not the inverse - * of @p condense. + * This function undoes the action of @p condense somehow, but it should be + * noted that it is not the inverse of @p condense. * - * The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same - * interface. + * The @p VectorType may be a Vector, Vector, + * BlockVector<...>, a PETSc or Trilinos vector wrapper class, or + * any other type having the same interface. */ template void distribute (const VectorType &condensed, VectorType &uncondensed) const; /** - * Re-distribute the elements of the - * vector in-place. The @p VectorType - * may be a Vector, - * Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same + * Re-distribute the elements of the vector in-place. The @p VectorType may + * be a Vector, Vector, BlockVector<...>, a PETSc or + * Trilinos vector wrapper class, or any other type having the same * interface. * - * Note that if called with a - * TrilinosWrappers::MPI::Vector it may - * not contain ghost elements. + * Note that if called with a TrilinosWrappers::MPI::Vector it may not + * contain ghost elements. */ template void distribute (VectorType &vec) const; @@ -1563,35 +1178,27 @@ public: private: /** - * This class represents one line of a - * constraint matrix. + * This class represents one line of a constraint matrix. */ struct ConstraintLine { /** - * A data type in which we store the list - * of entries that make up the homogenous - * part of a constraint. + * A data type in which we store the list of entries that make up the + * homogenous part of a constraint. */ typedef std::vector > Entries; /** - * Number of this line. Since only - * very few lines are stored, we - * can not assume a specific order - * and have to store the line - * number explicitly. + * Number of this line. Since only very few lines are stored, we can not + * assume a specific order and have to store the line number explicitly. */ size_type line; /** - * Row numbers and values of the - * entries in this line. + * Row numbers and values of the entries in this line. * - * For the reason why we use a - * vector instead of a map and the - * consequences thereof, the same - * applies as what is said for + * For the reason why we use a vector instead of a map and the + * consequences thereof, the same applies as what is said for * ConstraintMatrix::lines. */ Entries entries; @@ -1602,159 +1209,177 @@ private: double inhomogeneity; /** - * This operator is a bit weird and - * unintuitive: it compares the - * line numbers of two lines. We - * need this to sort the lines; in - * fact we could do this using a - * comparison predicate. However, - * this way, it is easier, albeit - * unintuitive since two lines - * really have no god-given order + * This operator is a bit weird and unintuitive: it compares the line + * numbers of two lines. We need this to sort the lines; in fact we could + * do this using a comparison predicate. However, this way, it is easier, + * albeit unintuitive since two lines really have no god-given order * relation. */ bool operator < (const ConstraintLine &) const; /** - * This operator is likewise weird: - * it checks whether the line - * indices of the two operands are - * equal, irrespective of the fact - * that the contents of the line - * may be different. + * This operator is likewise weird: it checks whether the line indices of + * the two operands are equal, irrespective of the fact that the contents + * of the line may be different. */ bool operator == (const ConstraintLine &) const; /** - * Determine an estimate for the - * memory consumption (in bytes) of - * this object. + * Determine an estimate for the memory consumption (in bytes) of this + * object. */ std::size_t memory_consumption () const; }; /** - * Store the lines of the matrix. - * Entries are usually appended in an - * arbitrary order and insertion into a - * vector is done best at the end, so - * the order is unspecified after all - * entries are inserted. Sorting of the - * entries takes place when calling the - * close() function. + * Store the lines of the matrix. Entries are usually appended in an + * arbitrary order and insertion into a vector is done best at the end, so + * the order is unspecified after all entries are inserted. Sorting of the + * entries takes place when calling the close() function. * - * We could, instead of using a vector, - * use an associative array, like a map - * to store the lines. This, however, - * would mean a much more fractioned - * heap since it allocates many small - * objects, and would additionally make - * usage of this matrix much slower. + * We could, instead of using a vector, use an associative array, like a map + * to store the lines. This, however, would mean a much more fractioned heap + * since it allocates many small objects, and would additionally make usage + * of this matrix much slower. */ std::vector lines; /** - * A list of size_type that - * contains the position of the - * ConstraintLine of a constrained degree - * of freedom, or - * numbers::invalid_size_type if the - * degree of freedom is not - * constrained. The - * numbers::invalid_size_type - * return value returns thus whether - * there is a constraint line for a given - * degree of freedom index. Note that - * this class has no notion of how many - * degrees of freedom there really are, - * so if we check whether there is a - * constraint line for a given degree of - * freedom, then this vector may actually - * be shorter than the index of the DoF - * we check for. - * - * This field exists since when adding a - * new constraint line we have to figure - * out whether it already - * exists. Previously, we would simply - * walk the unsorted list of constraint - * lines until we either hit the end or - * found it. This algorithm is O(N) if N - * is the number of constraints, which - * makes it O(N^2) when inserting all - * constraints. For large problems with - * many constraints, this could easily - * take 5-10 per cent of the total run - * time. With this field, we can save - * this time since we find any constraint - * in O(1) time or get to know that it a - * certain degree of freedom is not + * A list of size_type that contains the position of the ConstraintLine of a + * constrained degree of freedom, or numbers::invalid_size_type if the + * degree of freedom is not constrained. The numbers::invalid_size_type + * return value returns thus whether there is a constraint line for a given + * degree of freedom index. Note that this class has no notion of how many + * degrees of freedom there really are, so if we check whether there is a + * constraint line for a given degree of freedom, then this vector may + * actually be shorter than the index of the DoF we check for. + * + * This field exists since when adding a new constraint line we have to + * figure out whether it already exists. Previously, we would simply walk + * the unsorted list of constraint lines until we either hit the end or + * found it. This algorithm is O(N) if N is the number of constraints, which + * makes it O(N^2) when inserting all constraints. For large problems with + * many constraints, this could easily take 5-10 per cent of the total run + * time. With this field, we can save this time since we find any constraint + * in O(1) time or get to know that it a certain degree of freedom is not * constrained. * - * To make things worse, traversing the - * list of existing constraints requires - * reads from many different places in - * memory. Thus, in large 3d - * applications, the add_line() function - * showed up very prominently in the - * overall compute time, mainly because - * it generated a lot of cache - * misses. This should also be fixed by - * using the O(1) algorithm to access the - * fields of this array. - * - * The field is useful in a number of - * other contexts as well, e.g. when one - * needs random access to the constraints - * as in all the functions that apply - * constraints on the fly while add cell - * contributions into vectors and + * To make things worse, traversing the list of existing constraints + * requires reads from many different places in memory. Thus, in large 3d + * applications, the add_line() function showed up very prominently in the + * overall compute time, mainly because it generated a lot of cache + * misses. This should also be fixed by using the O(1) algorithm to access + * the fields of this array. + * + * The field is useful in a number of other contexts as well, e.g. when one + * needs random access to the constraints as in all the functions that apply + * constraints on the fly while add cell contributions into vectors and * matrices. */ std::vector lines_cache; /** - * This IndexSet is used to limit the - * lines to save in the ContraintMatrix - * to a subset. This is necessary, - * because the lines_cache vector would - * become too big in a distributed - * calculation. + * This IndexSet is used to limit the lines to save in the ConstraintMatrix + * to a subset. This is necessary, because the lines_cache vector would + * become too big in a distributed calculation. */ IndexSet local_lines; /** - * Store whether the arrays are sorted. - * If so, no new entries can be added. + * Store whether the arrays are sorted. If so, no new entries can be added. */ bool sorted; /** - * Internal function to calculate the - * index of line @p line in the vector + * Scratch data that is used during calls to distribute_local_to_global and + * add_entries_local_to_global. In order to avoid frequent memory + * allocation, we keep the data alive from one call to the next. + */ + struct ScratchData + { + /** + * Constructor, does nothing. + */ + ScratchData () : + in_use (false) + {} + + /** + * Copy constructor, does nothing + */ + ScratchData (const ScratchData &) : + in_use (false) + {} + + /** + * Stores whether the data is currently in use. + */ + bool in_use; + + /** + * Temporary array for column indices + */ + std::vector columns; + + /** + * Temporary array for column values + */ + std::vector values; + + /** + * Temporary array for block start indices + */ + std::vector block_starts; + + /** + * Temporary array for vector indices + */ + std::vector vector_indices; + + /** + * Data array for reorder row/column indices. Use a shared ptr to + * global_rows to avoid defining in the .h file + */ + std_cxx1x::shared_ptr global_rows; + + /** + * Data array for reorder row/column indices. Use a shared ptr to + * global_rows to avoid defining in the .h file + */ + std_cxx1x::shared_ptr global_columns; + }; + + /** + * Here comes the actual data structure for the scratch data. It is made + * mutable since it is modified in a const function. Since only one thread + * can access it at a time, no conflicting access can occur. For this to be + * valid, we need to make sure that no call within + * distribute_local_to_global is made that by itself can spawn + * tasks. Otherwise, we might end up in a situation where several threads + * fight for the data. + */ + mutable Threads::ThreadLocalStorage scratch_data; + + /** + * Internal function to calculate the index of line @p line in the vector * lines_cache using local_lines. */ size_type calculate_line_index (const size_type line) const; /** - * Return @p true if the weight of an - * entry (the second element of the - * pair) equals zero. This function is - * used to delete entries with zero - * weight. + * Return @p true if the weight of an entry (the second element of the pair) + * equals zero. This function is used to delete entries with zero weight. */ static bool check_zero_weight (const std::pair &p); /** - * Dummy table that serves as default - * argument for function + * Dummy table that serves as default argument for function * add_entries_local_to_global(). */ static const Table<2,bool> default_empty_table; /** - * This function actually implements - * the local_to_global function for + * This function actually implements the local_to_global function for * standard (non-block) matrices. */ template @@ -1768,9 +1393,8 @@ private: internal::bool2type) const; /** - * This function actually implements - * the local_to_global function for - * block matrices. + * This function actually implements the local_to_global function for block + * matrices. */ template void @@ -1783,8 +1407,7 @@ private: internal::bool2type) const; /** - * This function actually implements - * the local_to_global function for + * This function actually implements the local_to_global function for * standard (non-block) sparsity types. */ template @@ -1796,9 +1419,8 @@ private: internal::bool2type) const; /** - * This function actually implements - * the local_to_global function for - * block sparsity types. + * This function actually implements the local_to_global function for block + * sparsity types. */ template void @@ -1809,27 +1431,21 @@ private: internal::bool2type) const; /** - * Internal helper function for - * distribute_local_to_global function. + * Internal helper function for distribute_local_to_global function. * - * Creates a list of affected global rows - * for distribution, including the local - * rows where the entries come from. The - * list is sorted according to the global - * row indices. + * Creates a list of affected global rows for distribution, including the + * local rows where the entries come from. The list is sorted according to + * the global row indices. */ void make_sorted_row_list (const std::vector &local_dof_indices, internals::GlobalRowsFromLocal &global_rows) const; /** - * Internal helper function for - * add_entries_local_to_global function. + * Internal helper function for add_entries_local_to_global function. * - * Creates a list of affected rows for - * distribution without any additional - * information, otherwise similar to the - * other make_sorted_row_list() + * Creates a list of affected rows for distribution without any additional + * information, otherwise similar to the other make_sorted_row_list() * function. */ void @@ -1837,8 +1453,7 @@ private: std::vector &active_dofs) const; /** - * Internal helper function for - * distribute_local_to_global function. + * Internal helper function for distribute_local_to_global function. */ double resolve_vector_entry (const size_type i, @@ -1857,7 +1472,8 @@ ConstraintMatrix::ConstraintMatrix (const IndexSet &local_constraints) : lines (), local_lines (local_constraints), - sorted (false) + sorted (false), + scratch_data (ScratchData()) { // make sure the IndexSet is compressed. Otherwise this can lead to crashes // that are hard to find (only happen in release mode). @@ -1874,7 +1490,8 @@ ConstraintMatrix::ConstraintMatrix (const ConstraintMatrix &constraint_matrix) lines (constraint_matrix.lines), lines_cache (constraint_matrix.lines_cache), local_lines (constraint_matrix.local_lines), - sorted (constraint_matrix.sorted) + sorted (constraint_matrix.sorted), + scratch_data (ScratchData()) {} @@ -1884,34 +1501,25 @@ ConstraintMatrix::add_line (const size_type line) { Assert (sorted==false, ExcMatrixIsClosed()); - // the following can happen when we - // compute with distributed meshes - // and dof handlers and we - // constrain a degree of freedom - // whose number we don't have - // locally. if we don't abort here - // the program will try to allocate - // several terabytes of memory to - // resize the various arrays below - // :-) + // the following can happen when we compute with distributed meshes and dof + // handlers and we constrain a degree of freedom whose number we don't have + // locally. if we don't abort here the program will try to allocate several + // terabytes of memory to resize the various arrays below :-) Assert (line != numbers::invalid_size_type, ExcInternalError()); const size_type line_index = calculate_line_index (line); - // check whether line already exists; it - // may, in which case we can just quit + // check whether line already exists; it may, in which case we can just quit if (is_constrained(line)) return; - // if necessary enlarge vector of - // existing entries for cache + // if necessary enlarge vector of existing entries for cache if (line_index >= lines_cache.size()) lines_cache.resize (std::max(2*static_cast(lines_cache.size()), line_index+1), numbers::invalid_size_type); - // push a new line to the end of the - // list + // push a new line to the end of the list lines.push_back (ConstraintLine()); lines.back().line = line; lines.back().inhomogeneity = 0.; @@ -1930,14 +1538,11 @@ ConstraintMatrix::add_entry (const size_type line, Assert (line != column, ExcMessage ("Can't constrain a degree of freedom to itself")); - // if in debug mode, check whether an - // entry for this column already - // exists and if it's the same as - // the one entered at present + // if in debug mode, check whether an entry for this column already exists + // and if it's the same as the one entered at present // - // in any case: exit the function if an - // entry for this column already exists, - // since we don't want to enter it twice + // in any case: exit the function if an entry for this column already + // exists, since we don't want to enter it twice Assert (lines_cache[calculate_line_index(line)] != numbers::invalid_size_type, ExcInternalError()); ConstraintLine *line_ptr = &lines[lines_cache[calculate_line_index(line)]]; @@ -1998,8 +1603,7 @@ inline bool ConstraintMatrix::is_inhomogeneously_constrained (const size_type index) const { - // check whether the entry is - // constrained. could use is_constrained, but + // check whether the entry is constrained. could use is_constrained, but // that means computing the line index twice const size_type line_index = calculate_line_index(index); if (line_index >= lines_cache.size() || @@ -2018,8 +1622,7 @@ inline const std::vector > * ConstraintMatrix::get_constraint_entries (const size_type line) const { - // check whether the entry is - // constrained. could use is_constrained, but + // check whether the entry is constrained. could use is_constrained, but // that means computing the line index twice const size_type line_index = calculate_line_index(line); if (line_index >= lines_cache.size() || @@ -2035,8 +1638,7 @@ inline double ConstraintMatrix::get_inhomogeneity (const size_type line) const { - // check whether the entry is - // constrained. could use is_constrained, but + // check whether the entry is constrained. could use is_constrained, but // that means computing the line index twice const size_type line_index = calculate_line_index(line); if (line_index >= lines_cache.size() || @@ -2182,8 +1784,7 @@ distribute_local_to_global (const FullMatrix &local_matrix, const std::vector &local_dof_indices, MatrixType &global_matrix) const { - // create a dummy and hand on to the - // function actually implementing this + // create a dummy and hand on to the function actually implementing this // feature in the cm.templates.h file. Vector dummy(0); distribute_local_to_global (local_matrix, dummy, local_dof_indices, @@ -2204,10 +1805,8 @@ distribute_local_to_global (const FullMatrix &local_matrix, VectorType &global_vector, bool use_inhomogeneities_for_rhs) const { - // enter the internal function with the - // respective block information set, the - // actual implementation follows in the - // cm.templates.h file. + // enter the internal function with the respective block information set, + // the actual implementation follows in the cm.templates.h file. distribute_local_to_global (local_matrix, local_vector, local_dof_indices, global_matrix, global_vector, use_inhomogeneities_for_rhs, internal::bool2type::value>()); @@ -2224,10 +1823,8 @@ add_entries_local_to_global (const std::vector &local_dof_indices, const bool keep_constrained_entries, const Table<2,bool> &dof_mask) const { - // enter the internal function with the - // respective block information set, the - // actual implementation follows in the - // cm.templates.h file. + // enter the internal function with the respective block information set, + // the actual implementation follows in the cm.templates.h file. add_entries_local_to_global (local_dof_indices, sparsity_pattern, keep_constrained_entries, dof_mask, internal::bool2type::value>()); diff --git a/deal.II/include/deal.II/lac/constraint_matrix.templates.h b/deal.II/include/deal.II/lac/constraint_matrix.templates.h index 90aa944bfd..44dd38ae4b 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.templates.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.templates.h @@ -1289,10 +1289,12 @@ namespace internals void reinit () { - Assert (element_size == 0, ExcInternalError()); - element_size = 6; - data = new std::pair [20*6]; - individual_size.resize(20); + if (element_size == 0) + { + element_size = 6; + data = new std::pair [20*6]; + individual_size.resize(20); + } n_used_elements = 0; } @@ -1388,13 +1390,21 @@ namespace internals class GlobalRowsFromLocal { public: - GlobalRowsFromLocal (const size_type n_local_rows) + GlobalRowsFromLocal () : - total_row_indices (n_local_rows), - n_active_rows (n_local_rows), + n_active_rows (0), n_inhomogeneous_rows (0) {} + void reinit (const size_type n_local_rows) + { + total_row_indices.resize(n_local_rows); + for (unsigned int i=0; i inline void set_sparsity_diagonals (const internals::GlobalRowsFromLocal &global_rows, - const std::vector &local_dof_indices, + const std::vector &local_dof_indices, const Table<2,bool> &dof_mask, const bool keep_constrained_entries, SparsityType &sparsity_pattern) @@ -2187,8 +2199,8 @@ add_this_index: // are related to it. void ConstraintMatrix:: -make_sorted_row_list (const std::vector &local_dof_indices, - internals::GlobalRowsFromLocal &global_rows) const +make_sorted_row_list (const std::vector &local_dof_indices, + internals::GlobalRowsFromLocal &global_rows) const { const size_type n_local_dofs = local_dof_indices.size(); AssertDimension (n_local_dofs, global_rows.size()); @@ -2377,21 +2389,41 @@ ConstraintMatrix::distribute_local_to_global ( Assert (lines.empty() || sorted == true, ExcMatrixNotClosed()); const size_type n_local_dofs = local_dof_indices.size(); - internals::GlobalRowsFromLocal global_rows (n_local_dofs); + + ScratchData &my_scratch_data = scratch_data.get(); + Assert(my_scratch_data.in_use == false, + ExcMessage("Access to thread-local scratch data tried, but it is already " + "in use")); + // TODO: might want to have a scoped variable for in_use here and in the + // methods below + my_scratch_data.in_use = true; + + if (my_scratch_data.global_rows.get() == 0) + my_scratch_data.global_rows.reset(new internals::GlobalRowsFromLocal()); + internals::GlobalRowsFromLocal &global_rows = *my_scratch_data.global_rows; + global_rows.reinit(n_local_dofs); make_sorted_row_list (local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); // create arrays for the column data (indices and values) that will then be - // written into the matrix. Shortcut for deal.II sparse matrix - std::vector cols; - std::vector vals; + // written into the matrix. Shortcut for deal.II sparse matrix. We can use + // the scratch data if we have a double matrix. Otherwise, we need to create + // an array in any case since we cannot know about the actual data type in + // the ConstraintMatrix class (unless we do cast). This involves a little + // bit of logic to determine the type of the matrix value. + std::vector & cols = my_scratch_data.columns; + std::vector & vals = my_scratch_data.values; + std::vector values_non_double; SparseMatrix *sparse_matrix = dynamic_cast *>(&global_matrix); if (use_dealii_matrix == false) { cols.resize (n_actual_dofs); - vals.resize (n_actual_dofs); + if (types_are_equal::value == false) + values_non_double.resize(n_actual_dofs); + else + vals.resize (n_actual_dofs); } else Assert (sparse_matrix != 0, ExcInternalError()); @@ -2406,13 +2438,14 @@ ConstraintMatrix::distribute_local_to_global ( if (use_dealii_matrix == false) { size_type *col_ptr = &cols[0]; - number *val_ptr = &vals[0]; + // cast is uncritical here and only used to avoid compiler + // warnings. We never access a non-double array + number *val_ptr = types_are_equal::value ? + reinterpret_cast(&vals[0]) : &values_non_double[0]; internals::resolve_matrix_row (global_rows, global_rows, i, 0, n_actual_dofs, local_matrix, col_ptr, val_ptr); const size_type n_values = col_ptr - &cols[0]; - Assert (n_values == (size_type)(val_ptr - &vals[0]), - ExcInternalError()); if (n_values > 0) global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); } @@ -2440,6 +2473,7 @@ ConstraintMatrix::distribute_local_to_global ( internals::set_matrix_diagonals (global_rows, local_dof_indices, local_matrix, *this, global_matrix, global_vector, use_inhomogeneities_for_rhs); + my_scratch_data.in_use = false; } @@ -2460,8 +2494,21 @@ ConstraintMatrix::distribute_local_to_global ( const size_type n_local_row_dofs = row_indices.size(); const size_type n_local_col_dofs = col_indices.size(); - internals::GlobalRowsFromLocal global_rows (n_local_row_dofs); - internals::GlobalRowsFromLocal global_cols (n_local_col_dofs); + + ScratchData &my_scratch_data = scratch_data.get(); + Assert(my_scratch_data.in_use == false, + ExcMessage("Access to thread-local scratch data tried, but it is already " + "in use")); + my_scratch_data.in_use = true; + + if (my_scratch_data.global_rows.get() == 0) + my_scratch_data.global_rows.reset(new internals::GlobalRowsFromLocal()); + if (my_scratch_data.global_columns.get() == 0) + my_scratch_data.global_columns.reset(new internals::GlobalRowsFromLocal()); + internals::GlobalRowsFromLocal &global_rows = *my_scratch_data.global_rows; + global_rows.reinit(n_local_row_dofs); + internals::GlobalRowsFromLocal &global_cols = *my_scratch_data.global_columns; + global_cols.reinit(n_local_col_dofs); make_sorted_row_list (row_indices, global_rows); make_sorted_row_list (col_indices, global_cols); @@ -2470,8 +2517,14 @@ ConstraintMatrix::distribute_local_to_global ( // create arrays for the column data (indices and values) that will then be // written into the matrix. Shortcut for deal.II sparse matrix - std::vector cols (n_actual_col_dofs); - std::vector vals (n_actual_col_dofs); + std::vector & cols = my_scratch_data.columns; + std::vector & vals = my_scratch_data.values; + std::vector values_non_double; + cols.resize(n_actual_col_dofs); + if (types_are_equal::value == true) + vals.resize(n_actual_col_dofs); + else + values_non_double.resize(n_actual_col_dofs); // now do the actual job. for (size_type i=0; i::value ? + reinterpret_cast(&vals[0]) : &values_non_double[0]; internals::resolve_matrix_row (global_rows, global_cols, i, 0, n_actual_col_dofs, local_matrix, col_ptr, val_ptr); const size_type n_values = col_ptr - &cols[0]; - Assert (n_values == (size_type)(val_ptr - &vals[0]), - ExcInternalError()); if (n_values > 0) global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); } + + my_scratch_data.in_use = false; } @@ -2524,12 +2578,22 @@ distribute_local_to_global (const FullMatrix &local_matrix, } Assert (sorted == true, ExcMatrixNotClosed()); + ScratchData &my_scratch_data = scratch_data.get(); + Assert(my_scratch_data.in_use == false, + ExcMessage("Access to thread-local scratch data tried, but it is already " + "in use")); + my_scratch_data.in_use = true; + const size_type n_local_dofs = local_dof_indices.size(); - internals::GlobalRowsFromLocal global_rows (n_local_dofs); + if (my_scratch_data.global_rows.get() == 0) + my_scratch_data.global_rows.reset(new internals::GlobalRowsFromLocal()); + internals::GlobalRowsFromLocal &global_rows = *my_scratch_data.global_rows; + global_rows.reinit(n_local_dofs); + make_sorted_row_list (local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); - std::vector global_indices; + std::vector &global_indices = my_scratch_data.vector_indices; if (use_vectors == true) { global_indices.resize(n_actual_dofs); @@ -2539,15 +2603,20 @@ distribute_local_to_global (const FullMatrix &local_matrix, // additional construct that also takes care of block indices. const size_type num_blocks = global_matrix.n_block_rows(); - std::vector block_starts(num_blocks+1, n_actual_dofs); + std::vector &block_starts = my_scratch_data.block_starts; + block_starts.resize(num_blocks+1); internals::make_block_starts (global_matrix, global_rows, block_starts); - std::vector cols; - std::vector vals; + std::vector & cols = my_scratch_data.columns; + std::vector & vals = my_scratch_data.values; + std::vector values_non_double; if (use_dealii_matrix == false) { cols.resize (n_actual_dofs); - vals.resize (n_actual_dofs); + if (types_are_equal::value == true) + vals.resize(n_actual_dofs); + else + values_non_double.resize(n_actual_dofs); } // the basic difference to the non-block variant from now onwards is that we @@ -2567,13 +2636,12 @@ distribute_local_to_global (const FullMatrix &local_matrix, if (use_dealii_matrix == false) { size_type *col_ptr = &cols[0]; - number *val_ptr = &vals[0]; + number *val_ptr = types_are_equal::value ? + reinterpret_cast(&vals[0]) : &values_non_double[0]; internals::resolve_matrix_row (global_rows, global_rows, i, start_block, end_block, local_matrix, col_ptr, val_ptr); const size_type n_values = col_ptr - &cols[0]; - Assert (n_values == (size_type )(val_ptr - &vals[0]), - ExcInternalError()); if (n_values > 0) global_matrix.block(block, block_col).add(row, n_values, &cols[0], &vals[0], @@ -2607,6 +2675,8 @@ distribute_local_to_global (const FullMatrix &local_matrix, internals::set_matrix_diagonals (global_rows, local_dof_indices, local_matrix, *this, global_matrix, global_vector, use_inhomogeneities_for_rhs); + + my_scratch_data.in_use = false; } @@ -2630,13 +2700,20 @@ add_entries_local_to_global (const std::vector &local_dof_indices, AssertDimension (dof_mask.n_cols(), n_local_dofs); } + ScratchData &my_scratch_data = scratch_data.get(); + Assert(my_scratch_data.in_use == false, + ExcMessage("Access to thread-local scratch data tried, but it is already " + "in use")); + my_scratch_data.in_use = true; + // if the dof mask is not active, all we have to do is to add some indices // in a matrix format. To do this, we first create an array of all the // indices that are to be added. these indices are the local dof indices // plus some indices that come from constraints. if (dof_mask_is_active == false) { - std::vector actual_dof_indices (n_local_dofs); + std::vector & actual_dof_indices = my_scratch_data.columns; + actual_dof_indices.resize(n_local_dofs); make_sorted_row_list (local_dof_indices, actual_dof_indices); const size_type n_actual_dofs = actual_dof_indices.size(); @@ -2664,6 +2741,7 @@ add_entries_local_to_global (const std::vector &local_dof_indices, sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]); } + my_scratch_data.in_use = false; return; } @@ -2671,13 +2749,17 @@ add_entries_local_to_global (const std::vector &local_dof_indices, // complicated case: we need to filter out some indices. then the function // gets similar to the function for distributing matrix entries, see there // for additional comments. - internals::GlobalRowsFromLocal global_rows (n_local_dofs); + if (my_scratch_data.global_rows.get() == 0) + my_scratch_data.global_rows.reset(new internals::GlobalRowsFromLocal()); + internals::GlobalRowsFromLocal &global_rows = *my_scratch_data.global_rows; + global_rows.reinit(n_local_dofs); make_sorted_row_list (local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); // create arrays for the column indices that will then be written into the // sparsity pattern. - std::vector cols (n_actual_dofs); + std::vector & cols = my_scratch_data.columns; + cols.resize(n_actual_dofs); for (size_type i=0; i &local_dof_indices, internals::set_sparsity_diagonals (global_rows, local_dof_indices, dof_mask, keep_constrained_entries, sparsity_pattern); + my_scratch_data.in_use = false; } @@ -2705,9 +2788,9 @@ void ConstraintMatrix:: add_entries_local_to_global (const std::vector &row_indices, const std::vector &col_indices, - SparsityType &sparsity_pattern, - const bool keep_constrained_entries, - const Table<2,bool> &dof_mask) const + SparsityType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask) const { const size_type n_local_rows = row_indices.size(); const size_type n_local_cols = col_indices.size(); @@ -2777,6 +2860,12 @@ add_entries_local_to_global (const std::vector &local_dof_indices, const size_type n_local_dofs = local_dof_indices.size(); const size_type num_blocks = sparsity_pattern.n_block_rows(); + ScratchData &my_scratch_data = scratch_data.get(); + Assert(my_scratch_data.in_use == false, + ExcMessage("Access to thread-local scratch data tried, but it is already " + "in use")); + my_scratch_data.in_use = true; + bool dof_mask_is_active = false; if (dof_mask.n_rows() == n_local_dofs) { @@ -2786,12 +2875,14 @@ add_entries_local_to_global (const std::vector &local_dof_indices, if (dof_mask_is_active == false) { - std::vector actual_dof_indices (n_local_dofs); + std::vector & actual_dof_indices = my_scratch_data.columns; + actual_dof_indices.resize(n_local_dofs); make_sorted_row_list (local_dof_indices, actual_dof_indices); const size_type n_actual_dofs = actual_dof_indices.size(); // additional construct that also takes care of block indices. - std::vector block_starts(num_blocks+1, n_actual_dofs); + std::vector &block_starts = my_scratch_data.block_starts; + block_starts.resize(num_blocks+1); internals::make_block_starts (sparsity_pattern, actual_dof_indices, block_starts); @@ -2831,21 +2922,26 @@ add_entries_local_to_global (const std::vector &local_dof_indices, sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]); } + my_scratch_data.in_use = false; return; } // difficult case with dof_mask, similar to the distribute_local_to_global // function for block matrices - internals::GlobalRowsFromLocal global_rows (n_local_dofs); + if (my_scratch_data.global_rows.get() == 0) + my_scratch_data.global_rows.reset(new internals::GlobalRowsFromLocal()); + internals::GlobalRowsFromLocal &global_rows = *my_scratch_data.global_rows; + global_rows.reinit(n_local_dofs); make_sorted_row_list (local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); // additional construct that also takes care of block indices. - std::vector block_starts(num_blocks+1, n_actual_dofs); - internals::make_block_starts(sparsity_pattern, global_rows, - block_starts); + std::vector & block_starts = my_scratch_data.block_starts; + block_starts.resize(num_blocks+1); + internals::make_block_starts(sparsity_pattern, global_rows, block_starts); - std::vector cols (n_actual_dofs); + std::vector &cols = my_scratch_data.columns; + cols.resize(n_actual_dofs); // the basic difference to the non-block variant from now onwards is that we // go through the blocks of the matrix separately. @@ -2874,10 +2970,10 @@ add_entries_local_to_global (const std::vector &local_dof_indices, internals::set_sparsity_diagonals (global_rows, local_dof_indices, dof_mask, keep_constrained_entries, sparsity_pattern); + my_scratch_data.in_use = false; } DEAL_II_NAMESPACE_CLOSE #endif - diff --git a/deal.II/include/deal.II/numerics/vector_tools.templates.h b/deal.II/include/deal.II/numerics/vector_tools.templates.h index 77cc717739..7e79d0011d 100644 --- a/deal.II/include/deal.II/numerics/vector_tools.templates.h +++ b/deal.II/include/deal.II/numerics/vector_tools.templates.h @@ -1579,8 +1579,7 @@ namespace VectorTools "element.")); - // if for whatever reason we were - // passed an empty map, return + // if for whatever reason we were passed an empty map, return // immediately if (function_map.size() == 0) return; @@ -1603,78 +1602,48 @@ namespace VectorTools std::vector > dof_locations; dof_locations.reserve (DoFTools::max_dofs_per_face(dof)); - // array to store the values of - // the boundary function at the - // boundary points. have two arrays - // for scalar and vector functions - // to use the more efficient one - // respectively + // array to store the values of the boundary function at the boundary + // points. have two arrays for scalar and vector functions to use the + // more efficient one respectively std::vector dof_values_scalar; std::vector > dof_values_system; dof_values_scalar.reserve (DoFTools::max_dofs_per_face (dof)); dof_values_system.reserve (DoFTools::max_dofs_per_face (dof)); - // before we start with the loop - // over all cells create an - // hp::FEValues object that holds - // the interpolation points of all - // finite elements that may ever be - // in use + // before we start with the loop over all cells create an hp::FEValues + // object that holds the interpolation points of all finite elements + // that may ever be in use dealii::hp::FECollection finite_elements (dof.get_fe()); dealii::hp::QCollection q_collection; for (unsigned int f=0; f &fe = finite_elements[f]; - // generate a quadrature rule - // on the face from the unit - // support points. this will be - // used to obtain the - // quadrature points on the + // generate a quadrature rule on the face from the unit support + // points. this will be used to obtain the quadrature points on the // real cell's face // - // to do this, we check whether - // the FE has support points on - // the face at all: + // to do this, we check whether the FE has support points on the + // face at all: if (fe.has_face_support_points()) q_collection.push_back (Quadrature(fe.get_unit_face_support_points())); else { - // if not, then we should - // try a more clever - // way. the idea is that a - // finite element may not - // offer support points for - // all its shape functions, - // but maybe only some. if - // it offers support points - // for the components we - // are interested in in - // this function, then - // that's fine. if not, the - // function we call in the - // finite element will - // raise an exception. the - // support points for the - // other shape functions - // are left uninitialized - // (well, initialized by - // the default - // constructor), since we + // if not, then we should try a more clever way. the idea is + // that a finite element may not offer support points for all + // its shape functions, but maybe only some. if it offers + // support points for the components we are interested in in + // this function, then that's fine. if not, the function we call + // in the finite element will raise an exception. the support + // points for the other shape functions are left uninitialized + // (well, initialized by the default constructor), since we // don't need them anyway. // - // As a detour, we must - // make sure we only query - // face_system_to_component_index - // if the index corresponds - // to a primitive shape - // function. since we know - // that all the components - // we are interested in are - // primitive (by the above - // check), we can safely - // put such a check in - // front + // As a detour, we must make sure we only query + // face_system_to_component_index if the index corresponds to a + // primitive shape function. since we know that all the + // components we are interested in are primitive (by the above + // check), we can safely put such a check in front std::vector > unit_support_points (fe.dofs_per_face); for (unsigned int i=0; i(unit_support_points)); } } - // now that we have a q_collection - // object with all the right - // quadrature points, create an - // hp::FEFaceValues object that we - // can use to evaluate the boundary - // values at + // now that we have a q_collection object with all the right quadrature + // points, create an hp::FEFaceValues object that we can use to evaluate + // the boundary values at dealii::hp::MappingCollection mapping_collection (mapping); dealii::hp::FEFaceValues x_fe_values (mapping_collection, finite_elements, q_collection, update_quadrature_points); @@ -1705,15 +1671,11 @@ namespace VectorTools { const FiniteElement &fe = cell->get_fe(); - // we can presently deal only with - // primitive elements for boundary - // values. this does not preclude - // us using non-primitive elements - // in components that we aren't - // interested in, however. make - // sure that all shape functions - // that are non-zero for the - // components we are interested in, + // we can presently deal only with primitive elements for + // boundary values. this does not preclude us using + // non-primitive elements in components that we aren't + // interested in, however. make sure that all shape functions + // that are non-zero for the components we are interested in, // are in fact primitive for (unsigned int i=0; iget_fe().dofs_per_cell; ++i) { @@ -1732,15 +1694,9 @@ namespace VectorTools const typename DH::face_iterator face = cell->face(face_no); const types::boundary_id boundary_component = face->boundary_indicator(); - // see if this face is - // part of the - // boundaries for which - // we are supposed to - // do something, and - // also see if the - // finite element in - // use here has DoFs on - // the face at all + // see if this face is part of the boundaries for which we are + // supposed to do something, and also see if the finite element + // in use here has DoFs on the face at all if ((function_map.find(boundary_component) != function_map.end()) && (cell->get_fe().dofs_per_face > 0)) @@ -1750,9 +1706,8 @@ namespace VectorTools const dealii::FEFaceValues &fe_values = x_fe_values.get_present_fe_values(); - // get indices, physical location and - // boundary values of dofs on this - // face + // get indices, physical location and boundary values of + // dofs on this face face_dofs.resize (fe.dofs_per_face); face->get_dof_indices (face_dofs, cell->active_fe_index()); const std::vector > &dof_locations @@ -1760,12 +1715,8 @@ namespace VectorTools if (fe_is_system) { - // resize - // array. avoid - // construction of a - // memory allocating - // temporary if - // possible + // resize array. avoid construction of a memory + // allocating temporary if possible if (dof_values_system.size() < fe.dofs_per_face) dof_values_system.resize (fe.dofs_per_face, dealii::Vector(fe.n_components())); @@ -1775,17 +1726,10 @@ namespace VectorTools function_map.find(boundary_component)->second ->vector_value_list (dof_locations, dof_values_system); - // enter those dofs - // into the list that - // match the - // component - // signature. avoid - // the usual - // complication that - // we can't just use - // *_system_to_component_index - // for non-primitive - // FEs + // enter those dofs into the list that match the + // component signature. avoid the usual complication + // that we can't just use *_system_to_component_index + // for non-primitive FEs for (unsigned int i=0; isecond ->value_list (dof_locations, dof_values_scalar, 0); diff --git a/tests/deal.II/assemble_block_matrix_parallel.cc b/tests/deal.II/assemble_block_matrix_parallel.cc new file mode 100644 index 0000000000..8279d75034 --- /dev/null +++ b/tests/deal.II/assemble_block_matrix_parallel.cc @@ -0,0 +1,469 @@ +// --------------------------------------------------------------------- +// $Id$ +// +// Copyright (C) 2009 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// same as assemble_matrix_parallel, but now for a BlockSparseMatrix + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +std::ofstream logfile("output"); + +using namespace dealii; + + +namespace Assembly +{ + namespace Scratch + { + template + struct Data + { + Data (const hp::FECollection &fe, + const hp::QCollection &quadrature) + : + hp_fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | update_JxW_values) + {} + + Data (const Data &data) + : + hp_fe_values(data.hp_fe_values.get_mapping_collection(), + data.hp_fe_values.get_fe_collection(), + data.hp_fe_values.get_quadrature_collection(), + data.hp_fe_values.get_update_flags()) + {} + + hp::FEValues hp_fe_values; + }; + } + + namespace Copy + { + struct Data + { + std::vector local_dof_indices; + FullMatrix local_matrix; + Vector local_rhs; + }; + } +} + +template +class LaplaceProblem +{ +public: + LaplaceProblem (); + ~LaplaceProblem (); + + void run (); + +private: + void setup_system (); + void test_equality (); + void assemble_reference (); + void assemble_test (); + void solve (); + void create_coarse_grid (); + void postprocess (); + + void local_assemble (const typename hp::DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data); + void copy_local_to_global (const Assembly::Copy::Data &data); + + std::vector + get_conflict_indices (typename hp::DoFHandler::active_cell_iterator const &cell) const; + + Triangulation triangulation; + + hp::DoFHandler dof_handler; + hp::FECollection fe_collection; + hp::QCollection quadrature_collection; + hp::QCollection face_quadrature_collection; + + ConstraintMatrix constraints; + + BlockSparsityPattern sparsity_pattern; + BlockSparseMatrix reference_matrix; + BlockSparseMatrix test_matrix; + + Vector solution; + Vector reference_rhs; + Vector test_rhs; + + std::vector::active_cell_iterator> > graph; + + const unsigned int max_degree; +}; + + + +template +class BoundaryValues : public Function +{ +public: + BoundaryValues () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +BoundaryValues::value (const Point &p, + const unsigned int /*component*/) const +{ + double sum = 0; + for (unsigned int d=0; d +class RightHandSide : public Function +{ +public: + RightHandSide () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +RightHandSide::value (const Point &p, + const unsigned int /*component*/) const +{ + double product = 1; + for (unsigned int d=0; d +LaplaceProblem::LaplaceProblem () + : + dof_handler (triangulation), + max_degree (5) +{ + if (dim == 2) + for (unsigned int degree=2; degree<=max_degree; ++degree) + { + fe_collection.push_back (FE_Q(degree)); + quadrature_collection.push_back (QGauss(degree+1)); + face_quadrature_collection.push_back (QGauss(degree+1)); + } + else + for (unsigned int degree=1; degree(degree)); + quadrature_collection.push_back (QGauss(degree+1)); + face_quadrature_collection.push_back (QGauss(degree+1)); + } +} + + +template +LaplaceProblem::~LaplaceProblem () +{ + dof_handler.clear (); +} + + + +template +std::vector +LaplaceProblem:: +get_conflict_indices (typename hp::DoFHandler::active_cell_iterator const &cell) const +{ + std::vector local_dof_indices(cell->get_fe().dofs_per_cell); + cell->get_dof_indices(local_dof_indices); + + constraints.resolve_indices(local_dof_indices); + return local_dof_indices; +} + +template +void LaplaceProblem::setup_system () +{ + reference_matrix.clear(); + test_matrix.clear(); + dof_handler.distribute_dofs (fe_collection); + + solution.reinit (dof_handler.n_dofs()); + reference_rhs.reinit (dof_handler.n_dofs()); + test_rhs.reinit (dof_handler.n_dofs()); + + constraints.clear (); + + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + + // add boundary conditions as inhomogeneous constraints here, do it after + // having added the hanging node constraints in order to be consistent and + // skip dofs that are already constrained (i.e., are hanging nodes on the + // boundary in 3D). In contrast to step-27, we choose a sine function. + VectorTools::interpolate_boundary_values (dof_handler, + 0, + BoundaryValues(), + constraints); + constraints.close (); + + graph = GraphColoring::make_graph_coloring(dof_handler.begin_active(),dof_handler.end(), + static_cast + (typename hp::DoFHandler::active_cell_iterator const &)> > + (std_cxx1x::bind(&LaplaceProblem::get_conflict_indices, this,std_cxx1x::_1))); + + + BlockCompressedSimpleSparsityPattern csp (2, 2); + for (unsigned int i=0; i<2; ++i) + for (unsigned int j=0; j<2; ++j) + csp.block(i,j).reinit(i==0 ? 30 : dof_handler.n_dofs() - 30, + j==0 ? 30 : dof_handler.n_dofs() - 30); + csp.collect_sizes(); + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + sparsity_pattern.copy_from (csp); + + reference_matrix.reinit (sparsity_pattern); + test_matrix.reinit (sparsity_pattern); +} + + + +template +void +LaplaceProblem::local_assemble (const typename hp::DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data) +{ + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + + data.local_matrix.reinit (dofs_per_cell, dofs_per_cell); + data.local_matrix = 0; + + data.local_rhs.reinit (dofs_per_cell); + data.local_rhs = 0; + + scratch.hp_fe_values.reinit (cell); + + const FEValues &fe_values = scratch.hp_fe_values.get_present_fe_values (); + + const RightHandSide rhs_function; + + for (unsigned int q_point=0; + q_pointget_dof_indices (data.local_dof_indices); +} + + + +template +void +LaplaceProblem::copy_local_to_global (const Assembly::Copy::Data &data) +{ + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + test_matrix, test_rhs); +} + + + +template +void LaplaceProblem::assemble_reference () +{ + test_matrix = 0; + test_rhs = 0; + + Assembly::Copy::Data copy_data; + Assembly::Scratch::Data assembly_data(fe_collection, quadrature_collection); + + for (unsigned int color=0; color::active_cell_iterator>::const_iterator p = graph[color].begin(); + p != graph[color].end(); ++p) + { + local_assemble(*p, assembly_data, copy_data); + copy_local_to_global(copy_data); + } + + reference_matrix.add(1., test_matrix); + reference_rhs = test_rhs; +} + + + +template +void LaplaceProblem::assemble_test () +{ + test_matrix = 0; + test_rhs = 0; + + WorkStream:: + run (graph, + std_cxx1x::bind (&LaplaceProblem:: + local_assemble, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&LaplaceProblem:: + copy_local_to_global, + this, + std_cxx1x::_1), + Assembly::Scratch::Data(fe_collection, quadrature_collection), + Assembly::Copy::Data ()); + + test_matrix.add(-1, reference_matrix); + + for (unsigned int i=0; i<2; ++i) + for (unsigned int j=0; j<2; ++j) + { + const double frobenius_norm = test_matrix.block(i,j).frobenius_norm(); + + // the data should add up exactly (unfortunately, there is some roundoff due + // to the cell similarity detection, but there should not be any similarity + // for the hypershell geometry) + AssertThrow(frobenius_norm == 0., ExcInternalError()); + } + test_rhs.add(-1., reference_rhs); + AssertThrow(test_rhs.l2_norm() == 0., ExcInternalError()); + + deallog << "OK" << std::endl; +} + + + +template +void LaplaceProblem::postprocess () +{ + Vector estimated_error_per_cell (triangulation.n_active_cells()); + for (unsigned int i=0; i::active_cell_iterator cell = dof_handler.begin_active(); + cell != dof_handler.end(); ++cell) + cell->set_active_fe_index (rand() % fe_collection.size()); +} + + + + +template +void LaplaceProblem::run () +{ + for (unsigned int cycle=0; cycle<3; ++cycle) + { + if (cycle == 0) + { + GridGenerator::hyper_shell(triangulation, + Point(), + 0.5, 1., (dim==3) ? 96 : 12, false); + } + + setup_system (); + + assemble_reference (); + assemble_test (); + + if (cycle < 2) + postprocess (); + } +} + + + +int main () +{ + deallog << std::setprecision (2); + logfile << std::setprecision (2); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-8); + + { + deallog.push("2d"); + LaplaceProblem<2> laplace_problem; + laplace_problem.run (); + deallog.pop(); + } + + { + deallog.push("3d"); + LaplaceProblem<3> laplace_problem; + laplace_problem.run (); + deallog.pop(); + } +} + diff --git a/tests/deal.II/assemble_block_matrix_parallel.output b/tests/deal.II/assemble_block_matrix_parallel.output new file mode 100644 index 0000000000..43fb2379a9 --- /dev/null +++ b/tests/deal.II/assemble_block_matrix_parallel.output @@ -0,0 +1,7 @@ + +DEAL:2d::OK +DEAL:2d::OK +DEAL:2d::OK +DEAL:3d::OK +DEAL:3d::OK +DEAL:3d::OK diff --git a/tests/deal.II/assemble_matrix_parallel.cc b/tests/deal.II/assemble_matrix_parallel.cc new file mode 100644 index 0000000000..240d03e3ad --- /dev/null +++ b/tests/deal.II/assemble_matrix_parallel.cc @@ -0,0 +1,462 @@ +// --------------------------------------------------------------------- +// $Id$ +// +// Copyright (C) 2009 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// this function tests that assembly of matrices in parallel works properly, +// in particular the cache used in ConstraintMatrix that stores some scratch +// data and is accessed by several threads at the same time. + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +std::ofstream logfile("output"); + +using namespace dealii; + + +namespace Assembly +{ + namespace Scratch + { + template + struct Data + { + Data (const hp::FECollection &fe, + const hp::QCollection &quadrature) + : + hp_fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | update_JxW_values) + {} + + Data (const Data &data) + : + hp_fe_values(data.hp_fe_values.get_mapping_collection(), + data.hp_fe_values.get_fe_collection(), + data.hp_fe_values.get_quadrature_collection(), + data.hp_fe_values.get_update_flags()) + {} + + hp::FEValues hp_fe_values; + }; + } + + namespace Copy + { + struct Data + { + std::vector local_dof_indices; + FullMatrix local_matrix; + Vector local_rhs; + }; + } +} + +template +class LaplaceProblem +{ +public: + LaplaceProblem (); + ~LaplaceProblem (); + + void run (); + +private: + void setup_system (); + void test_equality (); + void assemble_reference (); + void assemble_test (); + void solve (); + void create_coarse_grid (); + void postprocess (); + + void local_assemble (const typename hp::DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data); + void copy_local_to_global (const Assembly::Copy::Data &data); + + std::vector + get_conflict_indices (typename hp::DoFHandler::active_cell_iterator const &cell) const; + + Triangulation triangulation; + + hp::DoFHandler dof_handler; + hp::FECollection fe_collection; + hp::QCollection quadrature_collection; + hp::QCollection face_quadrature_collection; + + ConstraintMatrix constraints; + + SparsityPattern sparsity_pattern; + SparseMatrix reference_matrix; + SparseMatrix test_matrix; + + Vector solution; + Vector reference_rhs; + Vector test_rhs; + + std::vector::active_cell_iterator> > graph; + + const unsigned int max_degree; +}; + + + +template +class BoundaryValues : public Function +{ +public: + BoundaryValues () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +BoundaryValues::value (const Point &p, + const unsigned int /*component*/) const +{ + double sum = 0; + for (unsigned int d=0; d +class RightHandSide : public Function +{ +public: + RightHandSide () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +RightHandSide::value (const Point &p, + const unsigned int /*component*/) const +{ + double product = 1; + for (unsigned int d=0; d +LaplaceProblem::LaplaceProblem () + : + dof_handler (triangulation), + max_degree (5) +{ + if (dim == 2) + for (unsigned int degree=2; degree<=max_degree; ++degree) + { + fe_collection.push_back (FE_Q(degree)); + quadrature_collection.push_back (QGauss(degree+1)); + face_quadrature_collection.push_back (QGauss(degree+1)); + } + else + for (unsigned int degree=1; degree(degree)); + quadrature_collection.push_back (QGauss(degree+1)); + face_quadrature_collection.push_back (QGauss(degree+1)); + } +} + + +template +LaplaceProblem::~LaplaceProblem () +{ + dof_handler.clear (); +} + + + +template +std::vector +LaplaceProblem:: +get_conflict_indices (typename hp::DoFHandler::active_cell_iterator const &cell) const +{ + std::vector local_dof_indices(cell->get_fe().dofs_per_cell); + cell->get_dof_indices(local_dof_indices); + + constraints.resolve_indices(local_dof_indices); + return local_dof_indices; +} + +template +void LaplaceProblem::setup_system () +{ + reference_matrix.clear(); + test_matrix.clear(); + dof_handler.distribute_dofs (fe_collection); + + solution.reinit (dof_handler.n_dofs()); + reference_rhs.reinit (dof_handler.n_dofs()); + test_rhs.reinit (dof_handler.n_dofs()); + + constraints.clear (); + + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + + // add boundary conditions as inhomogeneous constraints here, do it after + // having added the hanging node constraints in order to be consistent and + // skip dofs that are already constrained (i.e., are hanging nodes on the + // boundary in 3D). In contrast to step-27, we choose a sine function. + VectorTools::interpolate_boundary_values (dof_handler, + 0, + BoundaryValues(), + constraints); + constraints.close (); + + graph = GraphColoring::make_graph_coloring(dof_handler.begin_active(),dof_handler.end(), + static_cast + (typename hp::DoFHandler::active_cell_iterator const &)> > + (std_cxx1x::bind(&LaplaceProblem::get_conflict_indices, this,std_cxx1x::_1))); + + + CompressedSimpleSparsityPattern csp (dof_handler.n_dofs(), + dof_handler.n_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + sparsity_pattern.copy_from (csp); + + reference_matrix.reinit (sparsity_pattern); + test_matrix.reinit (sparsity_pattern); +} + + + +template +void +LaplaceProblem::local_assemble (const typename hp::DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data) +{ + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + + data.local_matrix.reinit (dofs_per_cell, dofs_per_cell); + data.local_matrix = 0; + + data.local_rhs.reinit (dofs_per_cell); + data.local_rhs = 0; + + scratch.hp_fe_values.reinit (cell); + + const FEValues &fe_values = scratch.hp_fe_values.get_present_fe_values (); + + const RightHandSide rhs_function; + + for (unsigned int q_point=0; + q_pointget_dof_indices (data.local_dof_indices); +} + + + +template +void +LaplaceProblem::copy_local_to_global (const Assembly::Copy::Data &data) +{ + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + test_matrix, test_rhs); +} + + + +template +void LaplaceProblem::assemble_reference () +{ + test_matrix = 0; + test_rhs = 0; + + Assembly::Copy::Data copy_data; + Assembly::Scratch::Data assembly_data(fe_collection, quadrature_collection); + + for (unsigned int color=0; color::active_cell_iterator>::const_iterator p = graph[color].begin(); + p != graph[color].end(); ++p) + { + local_assemble(*p, assembly_data, copy_data); + copy_local_to_global(copy_data); + } + + reference_matrix.add(1., test_matrix); + reference_rhs = test_rhs; +} + + + +template +void LaplaceProblem::assemble_test () +{ + test_matrix = 0; + test_rhs = 0; + + WorkStream:: + run (graph, + std_cxx1x::bind (&LaplaceProblem:: + local_assemble, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&LaplaceProblem:: + copy_local_to_global, + this, + std_cxx1x::_1), + Assembly::Scratch::Data(fe_collection, quadrature_collection), + Assembly::Copy::Data ()); + + test_matrix.add(-1, reference_matrix); + const double frobenius_norm = test_matrix.frobenius_norm(); + + // the data should add up exactly (unfortunately, there is some roundoff due + // to the cell similarity detection, but there should not be any similarity + // for the hypershell geometry) + AssertThrow(frobenius_norm == 0., ExcInternalError()); + test_rhs.add(-1., reference_rhs); + AssertThrow(test_rhs.l2_norm() == 0., ExcInternalError()); + + deallog << "OK" << std::endl; +} + + + +template +void LaplaceProblem::postprocess () +{ + Vector estimated_error_per_cell (triangulation.n_active_cells()); + for (unsigned int i=0; i::active_cell_iterator cell = dof_handler.begin_active(); + cell != dof_handler.end(); ++cell) + cell->set_active_fe_index (rand() % fe_collection.size()); +} + + + + +template +void LaplaceProblem::run () +{ + for (unsigned int cycle=0; cycle<3; ++cycle) + { + if (cycle == 0) + { + GridGenerator::hyper_shell(triangulation, + Point(), + 0.5, 1., (dim==3) ? 96 : 12, false); + } + + setup_system (); + + assemble_reference (); + assemble_test (); + + if (cycle < 2) + postprocess (); + } +} + + + +int main () +{ + deallog << std::setprecision (2); + logfile << std::setprecision (2); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-8); + + { + deallog.push("2d"); + LaplaceProblem<2> laplace_problem; + laplace_problem.run (); + deallog.pop(); + } + + { + deallog.push("3d"); + LaplaceProblem<3> laplace_problem; + laplace_problem.run (); + deallog.pop(); + } +} + diff --git a/tests/deal.II/assemble_matrix_parallel.output b/tests/deal.II/assemble_matrix_parallel.output new file mode 100644 index 0000000000..43fb2379a9 --- /dev/null +++ b/tests/deal.II/assemble_matrix_parallel.output @@ -0,0 +1,7 @@ + +DEAL:2d::OK +DEAL:2d::OK +DEAL:2d::OK +DEAL:3d::OK +DEAL:3d::OK +DEAL:3d::OK -- 2.39.5