From: kronbichler Date: Sat, 18 Jan 2014 16:22:14 +0000 (+0000) Subject: Use thread-safe path for Trilinos matrix assembly also when initializing with a Compr... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c85f459025582a9218ab90861dc37f1c6eb2454b;p=dealii-svn.git Use thread-safe path for Trilinos matrix assembly also when initializing with a CompressedSimpleSparsityPattern git-svn-id: https://svn.dealii.org/trunk@32246 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/compressed_simple_sparsity_pattern.h b/deal.II/include/deal.II/lac/compressed_simple_sparsity_pattern.h index 6257f4733a..fa745dcd53 100644 --- a/deal.II/include/deal.II/lac/compressed_simple_sparsity_pattern.h +++ b/deal.II/include/deal.II/lac/compressed_simple_sparsity_pattern.h @@ -98,139 +98,96 @@ public: typedef types::global_dof_index size_type; /** - * An iterator that can be used to - * iterate over the elements of a single - * row. The result of dereferencing such - * an iterator is a column index. + * An iterator that can be used to iterate over the elements of a single + * row. The result of dereferencing such an iterator is a column index. */ typedef std::vector::const_iterator row_iterator; /** - * Initialize the matrix empty, - * that is with no memory - * allocated. This is useful if - * you want such objects as - * member variables in other - * classes. You can make the - * structure usable by calling - * the reinit() function. + * Initialize the matrix empty, that is with no memory allocated. This is + * useful if you want such objects as member variables in other classes. You + * can make the structure usable by calling the reinit() function. */ CompressedSimpleSparsityPattern (); /** - * Copy constructor. This constructor is - * only allowed to be called if the - * matrix structure to be copied is - * empty. This is so in order to prevent - * involuntary copies of objects for - * temporaries, which can use large - * amounts of computing time. However, - * copy constructors are needed if you - * want to use the STL data types on - * classes like this, e.g. to write such - * statements like v.push_back - * (CompressedSparsityPattern());, - * with @p v a vector of @p - * CompressedSparsityPattern objects. + * Copy constructor. This constructor is only allowed to be called if the + * matrix structure to be copied is empty. This is so in order to prevent + * involuntary copies of objects for temporaries, which can use large + * amounts of computing time. However, copy constructors are needed if you + * want to use the STL data types on classes like this, e.g. to write such + * statements like v.push_back (CompressedSparsityPattern());, with + * @p v a vector of @p CompressedSparsityPattern objects. */ CompressedSimpleSparsityPattern (const CompressedSimpleSparsityPattern &); /** - * Initialize a rectangular - * matrix with @p m rows and - * @p n columns. The @p rowset - * restricts the storage to - * elements in rows of this set. - * Adding elements outside of - * this set has no effect. The - * default argument keeps all - * entries. + * Initialize a rectangular matrix with @p m rows and @p n columns. The @p + * rowset restricts the storage to elements in rows of this set. Adding + * elements outside of this set has no effect. The default argument keeps + * all entries. */ CompressedSimpleSparsityPattern (const size_type m, const size_type n, const IndexSet &rowset = IndexSet()); /** - * Create a square SparsityPattern using - * the index set. + * Create a square SparsityPattern using the index set. */ CompressedSimpleSparsityPattern (const IndexSet &indexset); /** - * Initialize a square matrix of - * dimension @p n. + * Initialize a square matrix of dimension @p n. */ CompressedSimpleSparsityPattern (const size_type n); /** - * Copy operator. For this the - * same holds as for the copy - * constructor: it is declared, - * defined and fine to be called, - * but the latter only for empty + * Copy operator. For this the same holds as for the copy constructor: it is + * declared, defined and fine to be called, but the latter only for empty * objects. */ CompressedSimpleSparsityPattern &operator = (const CompressedSimpleSparsityPattern &); /** - * Reallocate memory and set up - * data structures for a new - * matrix with @p m rows and - * @p n columns, with at most - * max_entries_per_row() nonzero - * entries per row. The @p rowset - * restricts the storage to - * elements in rows of this set. - * Adding elements outside of - * this set has no effect. The - * default argument keeps all - * entries. + * Reallocate memory and set up data structures for a new matrix with @p m + * rows and @p n columns, with at most max_entries_per_row() nonzero entries + * per row. The @p rowset restricts the storage to elements in rows of this + * set. Adding elements outside of this set has no effect. The default + * argument keeps all entries. */ void reinit (const size_type m, const size_type n, const IndexSet &rowset = IndexSet()); /** - * Since this object is kept - * compressed at all times anway, - * this function does nothing, - * but is declared to make the - * interface of this class as - * much alike as that of the - * SparsityPattern class. + * Since this object is kept compressed at all times anway, this function + * does nothing, but is declared to make the interface of this class as much + * alike as that of the SparsityPattern class. */ void compress (); /** - * Return whether the object is - * empty. It is empty if no - * memory is allocated, which is - * the same as that both - * dimensions are zero. + * Return whether the object is empty. It is empty if no memory is + * allocated, which is the same as that both dimensions are zero. */ bool empty () const; /** - * Return the maximum number of - * entries per row. Note that - * this number may change as - * entries are added. + * Return the maximum number of entries per row. Note that this number may + * change as entries are added. */ size_type max_entries_per_row () const; /** - * Add a nonzero entry to the - * matrix. If the entry already - * exists, nothing bad happens. + * Add a nonzero entry to the matrix. If the entry already exists, nothing + * bad happens. */ void add (const size_type i, const size_type j); /** - * Add several nonzero entries to the - * specified row of the matrix. If the - * entries already exist, nothing bad - * happens. + * Add several nonzero entries to the specified row of the matrix. If the + * entries already exist, nothing bad happens. */ template void add_entries (const size_type row, @@ -239,98 +196,70 @@ public: const bool indices_are_unique_and_sorted = false); /** - * Check if a value at a certain - * position may be non-zero. + * Check if a value at a certain position may be non-zero. */ bool exists (const size_type i, const size_type j) const; /** - * Make the sparsity pattern - * symmetric by adding the - * sparsity pattern of the + * Make the sparsity pattern symmetric by adding the sparsity pattern of the * transpose object. * - * This function throws an - * exception if the sparsity - * pattern does not represent a - * square matrix. + * This function throws an exception if the sparsity pattern does not + * represent a square matrix. */ void symmetrize (); /** - * Print the sparsity of the - * matrix. The output consists of - * one line per row of the format - * [i,j1,j2,j3,...]. i - * is the row number and - * jn are the allocated - * columns in this row. + * Print the sparsity of the matrix. The output consists of one line per row + * of the format [i,j1,j2,j3,...]. i is the row number and + * jn are the allocated columns in this row. */ void print (std::ostream &out) const; /** - * Print the sparsity of the matrix in a - * format that @p gnuplot understands and - * which can be used to plot the sparsity - * pattern in a graphical way. The format - * consists of pairs i j of - * nonzero elements, each representing - * one entry of this matrix, one per line - * of the output file. Indices are - * counted from zero on, as usual. Since - * sparsity patterns are printed in the - * same way as matrices are displayed, we - * print the negative of the column - * index, which means that the - * (0,0) element is in the top - * left rather than in the bottom left - * corner. + * Print the sparsity of the matrix in a format that @p gnuplot understands + * and which can be used to plot the sparsity pattern in a graphical + * way. The format consists of pairs i j of nonzero elements, each + * representing one entry of this matrix, one per line of the output + * file. Indices are counted from zero on, as usual. Since sparsity patterns + * are printed in the same way as matrices are displayed, we print the + * negative of the column index, which means that the (0,0) element + * is in the top left rather than in the bottom left corner. * - * Print the sparsity pattern in - * gnuplot by setting the data style - * to dots or points and use the - * @p plot command. + * Print the sparsity pattern in gnuplot by setting the data style to dots + * or points and use the @p plot command. */ void print_gnuplot (std::ostream &out) const; /** - * Return number of rows of this - * matrix, which equals the dimension - * of the image space. + * Return number of rows of this matrix, which equals the dimension of the + * image space. */ size_type n_rows () const; /** - * Return number of columns of this - * matrix, which equals the dimension - * of the range space. + * Return number of columns of this matrix, which equals the dimension of + * the range space. */ size_type n_cols () const; /** - * Number of entries in a - * specific row. This function - * can only be called if the - * given row is a member of the - * index set of rows that we want - * to store. + * Number of entries in a specific row. This function can only be called if + * the given row is a member of the index set of rows that we want to store. */ size_type row_length (const size_type row) const; /** - * Access to column number field. - * Return the column number of - * the @p indexth entry in @p row. + * Access to column number field. Return the column number of the @p + * indexth entry in @p row. */ size_type column_number (const size_type row, const size_type index) const; /** - * Return an iterator that can loop over - * all entries in the given - * row. Dereferencing the iterator yields - * a column index. + * Return an iterator that can loop over all entries in the given + * row. Dereferencing the iterator yields a column index. */ row_iterator row_begin (const size_type row) const; @@ -339,65 +268,52 @@ public: */ row_iterator row_end (const size_type row) const; /** - * Compute the bandwidth of the matrix - * represented by this structure. The - * bandwidth is the maximum of - * $|i-j|$ for which the index pair - * $(i,j)$ represents a nonzero entry - * of the matrix. + * Compute the bandwidth of the matrix represented by this structure. The + * bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ + * represents a nonzero entry of the matrix. */ size_type bandwidth () const; /** - * Return the number of nonzero elements - * allocated through this sparsity pattern. + * Return the number of nonzero elements allocated through this sparsity + * pattern. */ size_type n_nonzero_elements () const; /** - * Return the IndexSet that sets which - * rows are active on the current - * processor. It corresponds to the - * IndexSet given to this class in the + * Return the IndexSet that sets which rows are active on the current + * processor. It corresponds to the IndexSet given to this class in the * constructor or in the reinit function. */ const IndexSet &row_index_set () const; /** - * return whether this object stores only - * those entries that have been added - * explicitly, or if the sparsity pattern - * contains elements that have been added - * through other means (implicitly) while - * building it. For the current class, - * the result is always true. + * return whether this object stores only those entries that have been added + * explicitly, or if the sparsity pattern contains elements that have been + * added through other means (implicitly) while building it. For the current + * class, the result is always true. * - * This function mainly serves the - * purpose of describing the current - * class in cases where several kinds of - * sparsity patterns can be passed as + * This function mainly serves the purpose of describing the current class + * in cases where several kinds of sparsity patterns can be passed as * template arguments. */ static bool stores_only_added_elements (); /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. + * Determine an estimate for the memory consumption (in bytes) of this + * object. */ size_type memory_consumption () const; private: /** - * Number of rows that this sparsity - * structure shall represent. + * Number of rows that this sparsity structure shall represent. */ size_type rows; /** - * Number of columns that this sparsity - * structure shall represent. + * Number of columns that this sparsity structure shall represent. */ size_type cols; @@ -409,21 +325,17 @@ private: /** - * Store some data for each row - * describing which entries of this row - * are nonzero. Data is stored sorted in - * the @p entries std::vector. - * The vector per row is dynamically - * growing upon insertion doubling its - * memory each time. + * Store some data for each row describing which entries of this row are + * nonzero. Data is stored sorted in the @p entries std::vector. The vector + * per row is dynamically growing upon insertion doubling its memory each + * time. */ struct Line { public: /** - * Storage for the column indices of - * this row. This array is always - * kept sorted. + * Storage for the column indices of this row. This array is always kept + * sorted. */ std::vector entries; @@ -433,14 +345,12 @@ private: Line (); /** - * Add the given column number to - * this line. + * Add the given column number to this line. */ void add (const size_type col_num); /** - * Add the columns specified by the - * iterator range to this line. + * Add the columns specified by the iterator range to this line. */ template void add_entries (ForwardIterator begin, @@ -455,9 +365,7 @@ private: /** - * Actual data: store for each - * row the set of nonzero - * entries. + * Actual data: store for each row the set of nonzero entries. */ std::vector lines; }; @@ -470,28 +378,24 @@ inline void CompressedSimpleSparsityPattern::Line::add (const size_type j) { - // first check the last element (or if line - // is still empty) + // first check the last element (or if line is still empty) if ( (entries.size()==0) || ( entries.back() < j) ) { entries.push_back(j); return; } - // do a binary search to find the place - // where to insert: + // do a binary search to find the place where to insert: std::vector::iterator it = Utilities::lower_bound(entries.begin(), entries.end(), j); - // If this entry is a duplicate, exit - // immediately + // If this entry is a duplicate, exit immediately if (*it == j) return; - // Insert at the right place in the - // vector. Vector grows automatically to + // Insert at the right place in the vector. Vector grows automatically to // fit elements. Always doubles its size. entries.insert(it, j); } diff --git a/deal.II/include/deal.II/lac/sparsity_tools.h b/deal.II/include/deal.II/lac/sparsity_tools.h index 89b4a2b21c..55baf88362 100644 --- a/deal.II/include/deal.II/lac/sparsity_tools.h +++ b/deal.II/include/deal.II/lac/sparsity_tools.h @@ -52,152 +52,89 @@ namespace SparsityTools typedef types::global_dof_index size_type; /** - * Use the METIS partitioner to generate - * a partitioning of the degrees of - * freedom represented by this sparsity - * pattern. In effect, we view this - * sparsity pattern as a graph of - * connections between various degrees of - * freedom, where each nonzero entry in - * the sparsity pattern corresponds to an - * edge between two nodes in the - * connection graph. The goal is then to - * decompose this graph into groups of - * nodes so that a minimal number of - * edges are cut by the boundaries - * between node groups. This partitioning - * is done by METIS. Note that METIS can - * only partition symmetric sparsity - * patterns, and that of course the - * sparsity pattern has to be square. We - * do not check for symmetry of the - * sparsity pattern, since this is an - * expensive operation, but rather leave - * this as the responsibility of caller - * of this function. + * Use the METIS partitioner to generate a partitioning of the degrees of + * freedom represented by this sparsity pattern. In effect, we view this + * sparsity pattern as a graph of connections between various degrees of + * freedom, where each nonzero entry in the sparsity pattern corresponds to + * an edge between two nodes in the connection graph. The goal is then to + * decompose this graph into groups of nodes so that a minimal number of + * edges are cut by the boundaries between node groups. This partitioning is + * done by METIS. Note that METIS can only partition symmetric sparsity + * patterns, and that of course the sparsity pattern has to be square. We do + * not check for symmetry of the sparsity pattern, since this is an + * expensive operation, but rather leave this as the responsibility of + * caller of this function. * - * After calling this function, the - * output array will have values between - * zero and @p n_partitions-1 for each - * node (i.e. row or column of the + * After calling this function, the output array will have values between + * zero and @p n_partitions-1 for each node (i.e. row or column of the * matrix). * - * This function will generate an error - * if METIS is not installed unless - * @p n_partitions is one. I.e., you can - * write a program so that it runs in the - * single-processor single-partition case - * without METIS installed, and only - * requires METIS when multiple - * partitions are required. + * This function will generate an error if METIS is not installed unless @p + * n_partitions is one. I.e., you can write a program so that it runs in the + * single-processor single-partition case without METIS installed, and only + * requires METIS when multiple partitions are required. * - * Note that the sparsity pattern itself - * is not changed by calling this - * function. However, you will likely use - * the information generated by calling - * this function to renumber degrees of - * freedom, after which you will of - * course have to regenerate the sparsity - * pattern. + * Note that the sparsity pattern itself is not changed by calling this + * function. However, you will likely use the information generated by + * calling this function to renumber degrees of freedom, after which you + * will of course have to regenerate the sparsity pattern. * - * This function will rarely be called - * separately, since in finite element - * methods you will want to partition the - * mesh, not the matrix. This can be done - * by calling - * @p GridTools::partition_triangulation. + * This function will rarely be called separately, since in finite element + * methods you will want to partition the mesh, not the matrix. This can be + * done by calling @p GridTools::partition_triangulation. */ void partition (const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector &partition_indices); /** - * For a given sparsity pattern, compute a - * re-enumeration of row/column indices - * based on the algorithm by Cuthill-McKee. + * For a given sparsity pattern, compute a re-enumeration of row/column + * indices based on the algorithm by Cuthill-McKee. * - * This algorithm is a graph renumbering - * algorithm in which we attempt to find a - * new numbering of all nodes of a graph - * based on their connectivity to other - * nodes (i.e. the edges that connect - * nodes). This connectivity is here - * represented by the sparsity pattern. In - * many cases within the library, the nodes - * represent degrees of freedom and edges - * are nonzero entries in a matrix, - * i.e. pairs of degrees of freedom that - * couple through the action of a bilinear - * form. + * This algorithm is a graph renumbering algorithm in which we attempt to + * find a new numbering of all nodes of a graph based on their connectivity + * to other nodes (i.e. the edges that connect nodes). This connectivity is + * here represented by the sparsity pattern. In many cases within the + * library, the nodes represent degrees of freedom and edges are nonzero + * entries in a matrix, i.e. pairs of degrees of freedom that couple through + * the action of a bilinear form. * - * The algorithms starts at a node, - * searches the other nodes for - * those which are coupled with the one we - * started with and numbers these in a - * certain way. It then finds the second - * level of nodes, namely those that couple - * with those of the previous level (which - * were those that coupled with the initial - * node) and numbers these. And so on. For - * the details of the algorithm, especially - * the numbering within each level, we - * refer the reader to the book of Schwarz - * (H. R. Schwarz: Methode der finiten + * The algorithms starts at a node, searches the other nodes for those which + * are coupled with the one we started with and numbers these in a certain + * way. It then finds the second level of nodes, namely those that couple + * with those of the previous level (which were those that coupled with the + * initial node) and numbers these. And so on. For the details of the + * algorithm, especially the numbering within each level, we refer the + * reader to the book of Schwarz (H. R. Schwarz: Methode der finiten * Elemente). * - * These algorithms have one major - * drawback: they require a good starting - * node, i.e. node that will have number - * zero in the output array. A starting - * node forming the initial level of nodes - * can thus be given by the user, e.g. by - * exploiting knowledge of the actual - * topology of the domain. It is also - * possible to give several starting - * indices, which may be used to simulate a - * simple upstream numbering (by giving the - * inflow nodes as starting values) or to - * make preconditioning faster (by letting - * the Dirichlet boundary indices be - * starting points). + * These algorithms have one major drawback: they require a good starting + * node, i.e. node that will have number zero in the output array. A + * starting node forming the initial level of nodes can thus be given by the + * user, e.g. by exploiting knowledge of the actual topology of the + * domain. It is also possible to give several starting indices, which may + * be used to simulate a simple upstream numbering (by giving the inflow + * nodes as starting values) or to make preconditioning faster (by letting + * the Dirichlet boundary indices be starting points). * - * If no starting index is given, one is - * chosen automatically, namely one with - * the smallest coordination number (the - * coordination number is the number of - * other nodes this node couples - * with). This node is usually located on - * the boundary of the domain. There is, - * however, large ambiguity in this when - * using the hierarchical meshes used in - * this library, since in most cases the - * computational domain is not approximated - * by tilting and deforming elements and by - * plugging together variable numbers of - * elements at vertices, but rather by - * hierarchical refinement. There is - * therefore a large number of nodes with - * equal coordination numbers. The - * renumbering algorithms will therefore - * not give optimal results. + * If no starting index is given, one is chosen automatically, namely one + * with the smallest coordination number (the coordination number is the + * number of other nodes this node couples with). This node is usually + * located on the boundary of the domain. There is, however, large ambiguity + * in this when using the hierarchical meshes used in this library, since in + * most cases the computational domain is not approximated by tilting and + * deforming elements and by plugging together variable numbers of elements + * at vertices, but rather by hierarchical refinement. There is therefore a + * large number of nodes with equal coordination numbers. The renumbering + * algorithms will therefore not give optimal results. * - * If the graph has two or more - * unconnected components and if no - * starting indices are given, the - * algorithm will number each - * component - * consecutively. However, this - * requires the determination of a - * starting index for each - * component; as a consequence, the - * algorithm will produce an - * exception if starting indices - * are given, taking the latter as - * an indication that the caller of - * the function would like to - * override the part of the - * algorithm that chooses starting - * indices. + * If the graph has two or more unconnected components and if no starting + * indices are given, the algorithm will number each component + * consecutively. However, this requires the determination of a starting + * index for each component; as a consequence, the algorithm will produce an + * exception if starting indices are given, taking the latter as an + * indication that the caller of the function would like to override the + * part of the algorithm that chooses starting indices. */ void reorder_Cuthill_McKee (const SparsityPattern &sparsity, @@ -207,45 +144,26 @@ namespace SparsityTools #ifdef DEAL_II_WITH_MPI /** - * Communciate rows in a compressed - * sparsity pattern over MPI. + * Communciate rows in a compressed sparsity pattern over MPI. * - * @param csp is the sparsity - * pattern that has been built - * locally and for which we need to - * exchange entries with other - * processors to make sure that - * each processor knows all the - * elements of the rows of a matrix - * it stores and that may - * eventually be written to. This - * sparsity pattern will be changed - * as a result of this function: - * All entries in rows that belong - * to a different processor are - * sent to them and added there. + * @param csp is the sparsity pattern that has been built locally and for + * which we need to exchange entries with other processors to make sure that + * each processor knows all the elements of the rows of a matrix it stores + * and that may eventually be written to. This sparsity pattern will be + * changed as a result of this function: All entries in rows that belong to + * a different processor are sent to them and added there. * * @param rows_per_cpu determines ownership of rows. * - * @param mpi_comm is the MPI - * communicator that is shared - * between the processors that all - * participate in this operation. + * @param mpi_comm is the MPI communicator that is shared between the + * processors that all participate in this operation. * - * @param myrange indicates the - * range of elements stored locally - * and should be the one used in - * the constructor of the - * CompressedSimpleSparsityPattern. - * This should be the locally relevant set. - * Only - * rows contained in myrange are - * checked in csp for transfer. - * This function needs to be used - * with - * PETScWrappers::MPI::SparseMatrix - * for it to work correctly in a - * parallel computation. + * @param myrange indicates the range of elements stored locally and should + * be the one used in the constructor of the + * CompressedSimpleSparsityPattern. This should be the locally relevant + * set. Only rows contained in myrange are checked in csp for transfer. + * This function needs to be used with PETScWrappers::MPI::SparseMatrix for + * it to work correctly in a parallel computation. */ template void distribute_sparsity_pattern(CSP_t &csp, diff --git a/deal.II/include/deal.II/lac/trilinos_block_sparse_matrix.h b/deal.II/include/deal.II/lac/trilinos_block_sparse_matrix.h index 99d19b118a..1464bff090 100644 --- a/deal.II/include/deal.II/lac/trilinos_block_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/trilinos_block_sparse_matrix.h @@ -194,7 +194,8 @@ namespace TrilinosWrappers */ template void reinit (const std::vector &input_maps, - const BlockSparsityType &block_sparsity_pattern); + const BlockSparsityType &block_sparsity_pattern, + const bool exchange_data = false); /** * Resize the matrix, by using an @@ -207,7 +208,8 @@ namespace TrilinosWrappers template void reinit (const std::vector &input_maps, const BlockSparsityType &block_sparsity_pattern, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool exchange_data = false); /** * Resize the matrix and initialize it diff --git a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h index 2a21d5daa9..13571aa507 100644 --- a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h @@ -46,6 +46,8 @@ # include "Epetra_SerialComm.h" # endif +class Epetra_Export; + DEAL_II_NAMESPACE_OPEN // forward declarations @@ -54,6 +56,7 @@ template class BlockMatrixBase; template class SparseMatrix; class SparsityPattern; + namespace TrilinosWrappers { // forward declarations @@ -456,21 +459,26 @@ namespace TrilinosWrappers * matrix row at the same time can lead to data races and must be explicitly * avoided by the user. However, it is possible to access different * rows of the matrix from several threads simultaneously under the - * following two conditions: + * following three conditions: *
    *
  • The matrix uses only one MPI process. + *
  • The matrix has been initialized with the reinit() method + * with a CompressedSimpleSparsityPattern (that includes the set of locally + * relevant rows, i.e., the rows that an assembly routine will possibly + * write into). *
  • The matrix has been initialized from a * TrilinosWrappers::SparsityPattern object that in turn has been * initialized with the reinit function specifying three index sets, one * for the rows, one for the columns and for the larger set of @p - * writeable_rows, and the operation is an addition. Note that all other - * reinit methods and constructors of TrilinosWrappers::SparsityPattern - * will result in a matrix that needs to allocate off-processor entries on - * demand, which breaks thread-safety. Of course, using the respective - * reinit method for the block Trilinos sparsity pattern and block matrix - * also results in thread-safety. + * writeable_rows, and the operation is an addition. *
* + * Note that all other reinit methods and constructors of + * TrilinosWrappers::SparsityPattern will result in a matrix that needs to + * allocate off-processor entries on demand, which breaks thread-safety. Of + * course, using the respective reinit method for the block Trilinos + * sparsity pattern and block matrix also results in thread-safety. + * * @ingroup TrilinosWrappers * @ingroup Matrix1 * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 @@ -720,6 +728,11 @@ namespace TrilinosWrappers * set, each processor just sets the elements in the sparsity pattern that * belong to its rows. * + * If the sparsity pattern given to this function is of type + * CompressedSimpleSparsity pattern, then a matrix will be created that + * allows several threads to write into different rows of the matrix at + * the same also with MPI, as opposed to most other reinit() methods. + * * This is a collective operation that needs to be called on all * processors in order to avoid a dead lock. */ @@ -1844,6 +1857,11 @@ namespace TrilinosWrappers */ std_cxx1x::shared_ptr nonlocal_matrix; + /** + * An export object used to communicate the nonlocal matrix. + */ + std_cxx1x::shared_ptr nonlocal_matrix_exporter; + /** * Trilinos doesn't allow to mix additions to matrix entries and * overwriting them (to make synchronisation of %parallel computations diff --git a/deal.II/source/lac/trilinos_block_sparse_matrix.cc b/deal.II/source/lac/trilinos_block_sparse_matrix.cc index b338de4186..79cc8aa3c5 100644 --- a/deal.II/source/lac/trilinos_block_sparse_matrix.cc +++ b/deal.II/source/lac/trilinos_block_sparse_matrix.cc @@ -84,7 +84,8 @@ namespace TrilinosWrappers void BlockSparseMatrix:: reinit (const std::vector ¶llel_partitioning, - const BlockSparsityType &block_sparsity_pattern) + const BlockSparsityType &block_sparsity_pattern, + const bool exchange_data) { Assert (parallel_partitioning.size() == block_sparsity_pattern.n_block_rows(), ExcDimensionMismatch (parallel_partitioning.size(), @@ -118,7 +119,8 @@ namespace TrilinosWrappers { this->sub_objects[r][c]->reinit (parallel_partitioning[r], parallel_partitioning[c], - block_sparsity_pattern.block(r,c)); + block_sparsity_pattern.block(r,c), + exchange_data); } } @@ -129,14 +131,15 @@ namespace TrilinosWrappers BlockSparseMatrix:: reinit (const std::vector ¶llel_partitioning, const BlockSparsityType &block_sparsity_pattern, - const MPI_Comm &communicator) + const MPI_Comm &communicator, + const bool exchange_data) { std::vector epetra_maps; for (size_type i=0; i &, - const dealii::BlockSparsityPattern &); + const dealii::BlockSparsityPattern &, + const bool); template void BlockSparseMatrix::reinit (const std::vector &, - const dealii::BlockCompressedSparsityPattern &); + const dealii::BlockCompressedSparsityPattern &, + const bool); template void BlockSparseMatrix::reinit (const std::vector &, - const dealii::BlockCompressedSetSparsityPattern &); + const dealii::BlockCompressedSetSparsityPattern &, + const bool); template void BlockSparseMatrix::reinit (const std::vector &, - const dealii::BlockCompressedSimpleSparsityPattern &); + const dealii::BlockCompressedSimpleSparsityPattern &, + const bool); template void BlockSparseMatrix::reinit (const std::vector &, const dealii::BlockCompressedSimpleSparsityPattern &, - const MPI_Comm &); + const MPI_Comm &, + const bool); } diff --git a/deal.II/source/lac/trilinos_sparse_matrix.cc b/deal.II/source/lac/trilinos_sparse_matrix.cc index c486406f7f..644baa2f07 100644 --- a/deal.II/source/lac/trilinos_sparse_matrix.cc +++ b/deal.II/source/lac/trilinos_sparse_matrix.cc @@ -25,6 +25,7 @@ # include # include # include +# include # include # include @@ -390,6 +391,7 @@ namespace TrilinosWrappers SparseMatrix::copy_from (const SparseMatrix &m) { nonlocal_matrix.reset(); + nonlocal_matrix_exporter.reset(); // check whether we need to update the // partitioner or can just copy the data: @@ -480,6 +482,7 @@ namespace TrilinosWrappers // release memory before reallocation matrix.reset(); nonlocal_matrix.reset(); + nonlocal_matrix_exporter.reset(); // if we want to exchange data, build a usual Trilinos sparsity pattern // and let that handle the exchange. otherwise, manually create a @@ -575,10 +578,169 @@ namespace TrilinosWrappers + // specialization for CompressedSimpleSparsityPattern which can provide us + // with more information about the non-locally owned rows + template <> + void + SparseMatrix::reinit (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, + const CompressedSimpleSparsityPattern &sparsity_pattern, + const bool exchange_data) + { + matrix.reset(); + nonlocal_matrix.reset(); + nonlocal_matrix_exporter.reset(); + + AssertDimension (sparsity_pattern.n_rows(), + static_cast(n_global_elements(input_row_map))); + + // exchange data if requested with deal.II's own function +#ifdef DEAL_II_WITH_MPI + const Epetra_MpiComm* communicator = + dynamic_cast(&input_row_map.Comm()); + if (exchange_data && communicator != 0) + { + std::vector + owned_per_proc(communicator->NumProc(), -1); + size_type my_elements = input_row_map.NumMyElements(); + MPI_Allgather(&my_elements, 1, + Utilities::MPI::internal::mpi_type_id(&my_elements), + &owned_per_proc[0], 1, + Utilities::MPI::internal::mpi_type_id(&my_elements), + communicator->Comm()); + + SparsityTools::distribute_sparsity_pattern + (const_cast(sparsity_pattern), + owned_per_proc, communicator->Comm(), sparsity_pattern.row_index_set()); + } +#endif + + if (input_row_map.Comm().MyPID() == 0) + { + AssertDimension (sparsity_pattern.n_rows(), + static_cast(n_global_elements(input_row_map))); + AssertDimension (sparsity_pattern.n_cols(), + static_cast(n_global_elements(input_col_map))); + } + + column_space_map.reset (new Epetra_Map (input_col_map)); + + IndexSet relevant_rows (sparsity_pattern.row_index_set()); + // serial case + if (relevant_rows.size() == 0) + { + relevant_rows.set_size(n_global_elements(input_row_map)); + relevant_rows.add_range(0, n_global_elements(input_row_map)); + } + relevant_rows.compress(); + Assert(relevant_rows.n_elements() >= input_row_map.NumMyElements(), + ExcMessage("Locally relevant rows of sparsity pattern must contain " + "all locally owned rows")); + + const unsigned int n_rows = relevant_rows.n_elements(); + std::vector ghost_rows; + std::vector n_entries_per_row(input_row_map.NumMyElements()); + std::vector n_entries_per_ghost_row; + for (unsigned int i=0, own=0; i 0) + { + ghost_rows.push_back(global_row); + n_entries_per_ghost_row.push_back(sparsity_pattern.row_length(global_row)); + } + } + + // make sure all processors create an off-processor matrix with at least + // one entry + if (input_row_map.Comm().NumProc() > 1 && ghost_rows.empty() == true) + { + ghost_rows.push_back(0); + n_entries_per_ghost_row.push_back(1); + } + + Epetra_Map off_processor_map(-1, ghost_rows.size(), &ghost_rows[0], + 0, input_row_map.Comm()); + + std_cxx1x::shared_ptr graph, nonlocal_graph; + if (input_row_map.Comm().NumProc() > 1) + { + graph.reset (new Epetra_CrsGraph (Copy, input_row_map, + &n_entries_per_row[0], true)); + nonlocal_graph.reset (new Epetra_CrsGraph (Copy, off_processor_map, + &n_entries_per_ghost_row[0], + true)); + } + else + graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map, + &n_entries_per_row[0], true)); + + // now insert the indices, select between the right matrix + std::vector row_indices; + + for (unsigned int i=0; iInsertGlobalIndices (global_row, row_length, &row_indices[0]); + else + { + Assert(nonlocal_graph.get() != 0, ExcInternalError()); + nonlocal_graph->InsertGlobalIndices (global_row, row_length, + &row_indices[0]); + } + } + + graph->FillComplete(input_col_map, input_row_map); + graph->OptimizeStorage(); + + AssertDimension (sparsity_pattern.n_cols(),static_cast( + n_global_cols(*graph))); + + matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false)); + + // and now the same operations for the nonlocal graph and matrix + if (nonlocal_graph.get() != 0) + { + if (nonlocal_graph->IndicesAreGlobal() == false && + nonlocal_graph->RowMap().NumMyElements() > 0) + { + // insert dummy element + TrilinosWrappers::types::int_type row = nonlocal_graph->RowMap().MyGID(0); + nonlocal_graph->InsertGlobalIndices(row, 1, &row); + } + Assert(nonlocal_graph->IndicesAreGlobal() == true, + ExcInternalError()); + nonlocal_graph->FillComplete(input_col_map, input_row_map); + nonlocal_graph->OptimizeStorage(); + + nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *nonlocal_graph)); + } + last_action = Zero; + + // In the end, the matrix needs to be compressed in order to be really + // ready. + compress(); + } + + + void SparseMatrix::reinit (const SparsityPattern &sparsity_pattern) { matrix.reset (); + nonlocal_matrix_exporter.reset(); // reinit with a (parallel) Trilinos sparsity pattern. column_space_map.reset (new Epetra_Map @@ -602,6 +764,7 @@ namespace TrilinosWrappers { column_space_map.reset (new Epetra_Map (sparse_matrix.domain_partitioner())); matrix.reset (); + nonlocal_matrix_exporter.reset(); matrix.reset (new Epetra_FECrsMatrix (Copy, sparse_matrix.trilinos_sparsity_pattern(), false)); if (sparse_matrix.nonlocal_matrix != 0) @@ -767,6 +930,7 @@ namespace TrilinosWrappers const Epetra_CrsGraph *graph = &input_matrix.Graph(); nonlocal_matrix.reset(); + nonlocal_matrix_exporter.reset(); matrix.reset (); matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false)); @@ -818,8 +982,10 @@ namespace TrilinosWrappers // do only export in case of an add() operation, otherwise the owning // processor must have set the correct entry nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap()); - Epetra_Export exporter(nonlocal_matrix->RowMap(), matrix->RowMap()); - ierr = matrix->Export(*nonlocal_matrix, exporter, mode); + if (nonlocal_matrix_exporter.get() == 0) + nonlocal_matrix_exporter.reset + (new Epetra_Export(nonlocal_matrix->RowMap(), matrix->RowMap())); + ierr = matrix->Export(*nonlocal_matrix, *nonlocal_matrix_exporter, mode); AssertThrow(ierr == 0, ExcTrilinosError(ierr)); ierr = matrix->FillComplete(*column_space_map, matrix->RowMap()); nonlocal_matrix->PutScalar(0); @@ -850,6 +1016,7 @@ namespace TrilinosWrappers Utilities::Trilinos::comm_self())); matrix.reset (new Epetra_FECrsMatrix(View, *column_space_map, 0)); nonlocal_matrix.reset(); + nonlocal_matrix_exporter.reset(); matrix->FillComplete(); @@ -1619,11 +1786,6 @@ namespace TrilinosWrappers const Epetra_Map &, const CompressedSimpleSparsityPattern &, const bool); - template void - SparseMatrix::reinit (const Epetra_Map &, - const Epetra_Map &, - const CompressedSetSparsityPattern &, - const bool); } diff --git a/tests/trilinos/assemble_matrix_parallel_05.cc b/tests/trilinos/assemble_matrix_parallel_05.cc new file mode 100644 index 0000000000..f39a906e1a --- /dev/null +++ b/tests/trilinos/assemble_matrix_parallel_05.cc @@ -0,0 +1,454 @@ +// --------------------------------------------------------------------- +// $Id$ +// +// Copyright (C) 2009 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// tests thread safety of parallel Trilinos matrices. Same test as +// parallel_matrix_assemble_02 but initializing the matrix from +// CompressedSimpleSparsityPattern instead of a Trilinos sparsity pattern. + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +std::ofstream logfile("output"); + +using namespace dealii; + + +namespace Assembly +{ + namespace Scratch + { + template + struct Data + { + Data (const FiniteElement &fe, + const Quadrature &quadrature) + : + fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | update_JxW_values) + {} + + Data (const Data &data) + : + fe_values(data.fe_values.get_mapping(), + data.fe_values.get_fe(), + data.fe_values.get_quadrature(), + data.fe_values.get_update_flags()) + {} + + FEValues fe_values; + }; + } + + namespace Copy + { + struct Data + { + Data(const bool assemble_reference) + : + assemble_reference(assemble_reference) + {} + std::vector local_dof_indices; + FullMatrix local_matrix; + Vector local_rhs; + const bool assemble_reference; + }; + } +} + +template +class LaplaceProblem +{ +public: + LaplaceProblem (); + ~LaplaceProblem (); + + void run (); + +private: + void setup_system (); + void test_equality (); + void assemble_reference (); + void assemble_test (); + void solve (); + void create_coarse_grid (); + void postprocess (); + + void local_assemble (const FilteredIterator::active_cell_iterator> &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data); + void copy_local_to_global (const Assembly::Copy::Data &data); + + std::vector + get_conflict_indices (FilteredIterator::active_cell_iterator> const &cell) const; + + parallel::distributed::Triangulation triangulation; + + DoFHandler dof_handler; + FE_Q fe; + QGauss quadrature; + + ConstraintMatrix constraints; + + TrilinosWrappers::SparseMatrix reference_matrix; + TrilinosWrappers::SparseMatrix test_matrix; + + TrilinosWrappers::MPI::Vector reference_rhs; + TrilinosWrappers::MPI::Vector test_rhs; + + std::vector::active_cell_iterator> > > graph; +}; + + + +template +class BoundaryValues : public Function +{ +public: + BoundaryValues () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +BoundaryValues::value (const Point &p, + const unsigned int /*component*/) const +{ + double sum = 0; + for (unsigned int d=0; d +class RightHandSide : public Function +{ +public: + RightHandSide () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +RightHandSide::value (const Point &p, + const unsigned int /*component*/) const +{ + double product = 1; + for (unsigned int d=0; d +LaplaceProblem::LaplaceProblem () + : + triangulation (MPI_COMM_WORLD), + dof_handler (triangulation), + fe (1), + quadrature(fe.degree+1) +{ +} + + +template +LaplaceProblem::~LaplaceProblem () +{ + dof_handler.clear (); +} + + + +template +std::vector +LaplaceProblem:: +get_conflict_indices (FilteredIterator::active_cell_iterator> const &cell) const +{ + std::vector local_dof_indices(cell->get_fe().dofs_per_cell); + cell->get_dof_indices(local_dof_indices); + + constraints.resolve_indices(local_dof_indices); + return local_dof_indices; +} + +template +void LaplaceProblem::setup_system () +{ + reference_matrix.clear(); + test_matrix.clear(); + dof_handler.distribute_dofs (fe); + + constraints.clear (); + + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + + // add boundary conditions as inhomogeneous constraints here, do it after + // having added the hanging node constraints in order to be consistent and + // skip dofs that are already constrained (i.e., are hanging nodes on the + // boundary in 3D). In contrast to step-27, we choose a sine function. + VectorTools::interpolate_boundary_values (dof_handler, + 0, + BoundaryValues(), + constraints); + constraints.close (); + + typedef FilteredIterator::active_cell_iterator> CellFilter; + CellFilter begin(IteratorFilters::LocallyOwnedCell(),dof_handler.begin_active()); + CellFilter end(IteratorFilters::LocallyOwnedCell(),dof_handler.end()); + graph = GraphColoring::make_graph_coloring(begin,end, + static_cast + (FilteredIterator::active_cell_iterator> const &)> > + (std_cxx1x::bind(&LaplaceProblem::get_conflict_indices, this,std_cxx1x::_1))); + + IndexSet locally_owned = dof_handler.locally_owned_dofs(); + { + TrilinosWrappers::SparsityPattern csp; + csp.reinit(locally_owned, locally_owned, MPI_COMM_WORLD); + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + csp.compress(); + reference_matrix.reinit (csp); + reference_rhs.reinit (locally_owned, MPI_COMM_WORLD); + } + { + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dof_handler, relevant_set); + CompressedSimpleSparsityPattern csp(dof_handler.n_dofs(), dof_handler.n_dofs(), + relevant_set); + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + test_matrix.reinit (locally_owned, csp, MPI_COMM_WORLD, true); + test_rhs.reinit (locally_owned, relevant_set, MPI_COMM_WORLD, true); + } +} + + + +template +void +LaplaceProblem::local_assemble (const FilteredIterator::active_cell_iterator> &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data) +{ + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + + data.local_matrix.reinit (dofs_per_cell, dofs_per_cell); + data.local_matrix = 0; + + data.local_rhs.reinit (dofs_per_cell); + data.local_rhs = 0; + + scratch.fe_values.reinit (cell); + + const FEValues &fe_values = scratch.fe_values; + + const RightHandSide rhs_function; + + for (unsigned int q_point=0; + q_pointget_dof_indices (data.local_dof_indices); +} + + + +template +void +LaplaceProblem::copy_local_to_global (const Assembly::Copy::Data &data) +{ + if (data.assemble_reference) + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + reference_matrix, reference_rhs); + else + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + test_matrix, test_rhs); +} + + + +template +void LaplaceProblem::assemble_reference () +{ + reference_matrix = 0; + reference_rhs = 0; + + Assembly::Copy::Data copy_data(true); + Assembly::Scratch::Data assembly_data(fe, quadrature); + + for (unsigned int color=0; color::active_cell_iterator> >::const_iterator p = graph[color].begin(); + p != graph[color].end(); ++p) + { + local_assemble(*p, assembly_data, copy_data); + copy_local_to_global(copy_data); + } + reference_matrix.compress(VectorOperation::add); + reference_rhs.compress(VectorOperation::add); +} + + + +template +void LaplaceProblem::assemble_test () +{ + test_matrix = 0; + test_rhs = 0; + + WorkStream:: + run (graph, + std_cxx1x::bind (&LaplaceProblem:: + local_assemble, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&LaplaceProblem:: + copy_local_to_global, + this, + std_cxx1x::_1), + Assembly::Scratch::Data(fe, quadrature), + Assembly::Copy::Data (false), + 2*multithread_info.n_threads(), + 1); + test_matrix.compress(VectorOperation::add); + test_rhs.compress(VectorOperation::add); + + test_matrix.add(-1, reference_matrix); + + // there should not even be roundoff difference between matrices + deallog.threshold_double(1.e-30); + deallog << "error in matrix: " << test_matrix.frobenius_norm() << std::endl; + test_rhs.add(-1., reference_rhs); + deallog << "error in vector: " << test_rhs.l2_norm() << std::endl; +} + + + +template +void LaplaceProblem::postprocess () +{ + Vector estimated_error_per_cell (triangulation.n_active_cells()); + for (unsigned int i=0; i +void LaplaceProblem::run () +{ + for (unsigned int cycle=0; cycle<3; ++cycle) + { + if (cycle == 0) + { + GridGenerator::hyper_cube(triangulation, 0, 1); + triangulation.refine_global(6); + } + + setup_system (); + + assemble_reference (); + assemble_test (); + + if (cycle < 2) + postprocess (); + } +} + + + +int main (int argc, char **argv) +{ + deallog << std::setprecision (2); + logfile << std::setprecision (2); + deallog.attach(logfile); + deallog.depth_console(0); + + Utilities::MPI::MPI_InitFinalize init(argc, argv, numbers::invalid_unsigned_int); + + { + deallog.push("2d"); + LaplaceProblem<2> laplace_problem; + laplace_problem.run (); + deallog.pop(); + } +} + diff --git a/tests/trilinos/assemble_matrix_parallel_05.mpirun=1.output b/tests/trilinos/assemble_matrix_parallel_05.mpirun=1.output new file mode 100644 index 0000000000..6e5eabc7ba --- /dev/null +++ b/tests/trilinos/assemble_matrix_parallel_05.mpirun=1.output @@ -0,0 +1,7 @@ + +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 diff --git a/tests/trilinos/assemble_matrix_parallel_05.mpirun=4.output b/tests/trilinos/assemble_matrix_parallel_05.mpirun=4.output new file mode 100644 index 0000000000..6e5eabc7ba --- /dev/null +++ b/tests/trilinos/assemble_matrix_parallel_05.mpirun=4.output @@ -0,0 +1,7 @@ + +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 diff --git a/tests/trilinos/assemble_matrix_parallel_06.cc b/tests/trilinos/assemble_matrix_parallel_06.cc new file mode 100644 index 0000000000..592f21a821 --- /dev/null +++ b/tests/trilinos/assemble_matrix_parallel_06.cc @@ -0,0 +1,479 @@ +// --------------------------------------------------------------------- +// $Id$ +// +// Copyright (C) 2009 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// tests thread safety of parallel Trilinos block matrices. Same test as +// parallel_matrix_assemble_04 but initializing the matrix from +// BlockCompressedSimpleSparsityPattern instead of a Trilinos sparsity pattern. + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +std::ofstream logfile("output"); + +using namespace dealii; + + +namespace Assembly +{ + namespace Scratch + { + template + struct Data + { + Data (const FiniteElement &fe, + const Quadrature &quadrature) + : + fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | update_JxW_values) + {} + + Data (const Data &data) + : + fe_values(data.fe_values.get_mapping(), + data.fe_values.get_fe(), + data.fe_values.get_quadrature(), + data.fe_values.get_update_flags()) + {} + + FEValues fe_values; + }; + } + + namespace Copy + { + struct Data + { + Data(const bool assemble_reference) + : + assemble_reference(assemble_reference) + {} + std::vector local_dof_indices; + FullMatrix local_matrix; + Vector local_rhs; + const bool assemble_reference; + }; + } +} + +template +class LaplaceProblem +{ +public: + LaplaceProblem (); + ~LaplaceProblem (); + + void run (); + +private: + void setup_system (); + void test_equality (); + void assemble_reference (); + void assemble_test (); + void solve (); + void create_coarse_grid (); + void postprocess (); + + void local_assemble (const FilteredIterator::active_cell_iterator> &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data); + void copy_local_to_global (const Assembly::Copy::Data &data); + + std::vector + get_conflict_indices (FilteredIterator::active_cell_iterator> const &cell) const; + + parallel::distributed::Triangulation triangulation; + + DoFHandler dof_handler; + FESystem fe; + QGauss quadrature; + + ConstraintMatrix constraints; + + TrilinosWrappers::BlockSparseMatrix reference_matrix; + TrilinosWrappers::BlockSparseMatrix test_matrix; + + TrilinosWrappers::MPI::BlockVector reference_rhs; + TrilinosWrappers::MPI::BlockVector test_rhs; + + std::vector::active_cell_iterator> > > graph; +}; + + + +template +class BoundaryValues : public Function +{ +public: + BoundaryValues () : Function (2) {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +BoundaryValues::value (const Point &p, + const unsigned int /*component*/) const +{ + double sum = 0; + for (unsigned int d=0; d +class RightHandSide : public Function +{ +public: + RightHandSide () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +RightHandSide::value (const Point &p, + const unsigned int /*component*/) const +{ + double product = 1; + for (unsigned int d=0; d +LaplaceProblem::LaplaceProblem () + : + triangulation (MPI_COMM_WORLD), + dof_handler (triangulation), + fe (FE_Q(1),1, FE_Q(2),1), + quadrature(3) +{ +} + + +template +LaplaceProblem::~LaplaceProblem () +{ + dof_handler.clear (); +} + + + +template +std::vector +LaplaceProblem:: +get_conflict_indices (FilteredIterator::active_cell_iterator> const &cell) const +{ + std::vector local_dof_indices(cell->get_fe().dofs_per_cell); + cell->get_dof_indices(local_dof_indices); + + constraints.resolve_indices(local_dof_indices); + return local_dof_indices; +} + +template +void LaplaceProblem::setup_system () +{ + reference_matrix.clear(); + test_matrix.clear(); + dof_handler.distribute_dofs (fe); + std::vector blocks(2,0); + blocks[1] = 1; + DoFRenumbering::component_wise(dof_handler, blocks); + + constraints.clear (); + + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + + // add boundary conditions as inhomogeneous constraints here, do it after + // having added the hanging node constraints in order to be consistent and + // skip dofs that are already constrained (i.e., are hanging nodes on the + // boundary in 3D). In contrast to step-27, we choose a sine function. + VectorTools::interpolate_boundary_values (dof_handler, + 0, + BoundaryValues(), + constraints); + constraints.close (); + + typedef FilteredIterator::active_cell_iterator> CellFilter; + CellFilter begin(IteratorFilters::LocallyOwnedCell(),dof_handler.begin_active()); + CellFilter end(IteratorFilters::LocallyOwnedCell(),dof_handler.end()); + graph = GraphColoring::make_graph_coloring(begin,end, + static_cast + (FilteredIterator::active_cell_iterator> const &)> > + (std_cxx1x::bind(&LaplaceProblem::get_conflict_indices, this,std_cxx1x::_1))); + + TrilinosWrappers::BlockSparsityPattern csp(2,2); + std::vector locally_owned(2), relevant_set(2); + IndexSet locally_owned_total = dof_handler.locally_owned_dofs(), relevant_total; + DoFTools::extract_locally_relevant_dofs (dof_handler, relevant_total); + + std::vector dofs_per_block (2); + DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, blocks); + locally_owned[0] = locally_owned_total.get_view(0, dofs_per_block[0]); + locally_owned[1] = locally_owned_total.get_view(dofs_per_block[0], + dof_handler.n_dofs()); + relevant_set[0] = relevant_total.get_view(0, dofs_per_block[0]); + relevant_set[1] = relevant_total.get_view(dofs_per_block[0], + dof_handler.n_dofs()); + + { + csp.reinit(locally_owned, locally_owned, MPI_COMM_WORLD); + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + csp.compress(); + reference_matrix.reinit (csp); + reference_rhs.reinit (locally_owned, MPI_COMM_WORLD); + } + { + BlockCompressedSimpleSparsityPattern csp(relevant_set); + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + test_matrix.reinit (locally_owned, csp, MPI_COMM_WORLD, true); + test_rhs.reinit (locally_owned, relevant_set, MPI_COMM_WORLD, true); + } +} + + + +template +void +LaplaceProblem::local_assemble (const FilteredIterator::active_cell_iterator> &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data) +{ + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + + data.local_matrix.reinit (dofs_per_cell, dofs_per_cell); + data.local_matrix = 0; + + data.local_rhs.reinit (dofs_per_cell); + data.local_rhs = 0; + + scratch.fe_values.reinit (cell); + + const FEValues &fe_values = scratch.fe_values; + + const RightHandSide rhs_function; + + // this does not make a lot of sense physically but it serves the purpose of + // the test well + for (unsigned int q_point=0; + q_pointget_dof_indices (data.local_dof_indices); +} + + + +template +void +LaplaceProblem::copy_local_to_global (const Assembly::Copy::Data &data) +{ + if (data.assemble_reference) + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + reference_matrix, reference_rhs); + else + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + test_matrix, test_rhs); +} + + + +template +void LaplaceProblem::assemble_reference () +{ + reference_matrix = 0; + reference_rhs = 0; + + Assembly::Copy::Data copy_data(true); + Assembly::Scratch::Data assembly_data(fe, quadrature); + + for (unsigned int color=0; color::active_cell_iterator> >::const_iterator p = graph[color].begin(); + p != graph[color].end(); ++p) + { + local_assemble(*p, assembly_data, copy_data); + copy_local_to_global(copy_data); + } + reference_matrix.compress(VectorOperation::add); + reference_rhs.compress(VectorOperation::add); +} + + + +template +void LaplaceProblem::assemble_test () +{ + test_matrix = 0; + test_rhs = 0; + + WorkStream:: + run (graph, + std_cxx1x::bind (&LaplaceProblem:: + local_assemble, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&LaplaceProblem:: + copy_local_to_global, + this, + std_cxx1x::_1), + Assembly::Scratch::Data(fe, quadrature), + Assembly::Copy::Data (false), + 2*multithread_info.n_threads(), + 1); + test_matrix.compress(VectorOperation::add); + test_rhs.compress(VectorOperation::add); + + test_matrix.add(-1, reference_matrix); + + // there should not even be roundoff difference between matrices + deallog.threshold_double(1.e-30); + double frobenius_norm = 0; + for (unsigned int i=0; i<2; ++i) + for (unsigned int j=0; j<2; ++j) + frobenius_norm += numbers::NumberTraits::abs_square(test_matrix.block(i,j).frobenius_norm()); + deallog << "error in matrix: " << std::sqrt(frobenius_norm) << std::endl; + test_rhs.add(-1., reference_rhs); + deallog << "error in vector: " << test_rhs.l2_norm() << std::endl; +} + + + +template +void LaplaceProblem::postprocess () +{ + Vector estimated_error_per_cell (triangulation.n_active_cells()); + for (unsigned int i=0; i +void LaplaceProblem::run () +{ + for (unsigned int cycle=0; cycle<3; ++cycle) + { + if (cycle == 0) + { + GridGenerator::hyper_shell(triangulation, + Point(), + 0.5, 1., (dim==3) ? 96 : 12, false); +#ifdef DEBUG + triangulation.refine_global(3); +#else + triangulation.refine_global(5); +#endif + } + + setup_system (); + + assemble_reference (); + assemble_test (); + + if (cycle < 2) + postprocess (); + } +} + + + +int main (int argc, char **argv) +{ + deallog << std::setprecision (2); + logfile << std::setprecision (2); + deallog.attach(logfile); + deallog.depth_console(0); + + Utilities::MPI::MPI_InitFinalize init(argc, argv, numbers::invalid_unsigned_int); + + { + deallog.push("2d"); + LaplaceProblem<2> laplace_problem; + laplace_problem.run (); + deallog.pop(); + } +} + diff --git a/tests/trilinos/assemble_matrix_parallel_06.mpirun=4.output b/tests/trilinos/assemble_matrix_parallel_06.mpirun=4.output new file mode 100644 index 0000000000..6e5eabc7ba --- /dev/null +++ b/tests/trilinos/assemble_matrix_parallel_06.mpirun=4.output @@ -0,0 +1,7 @@ + +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0