From: Sebastian Kinnewig Date: Mon, 23 Oct 2023 08:41:41 +0000 (+0200) Subject: Add a Tpetra-based version of Trilinos SparseMatrix and SparsityPattern. X-Git-Tag: relicensing~186^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f0eaee6f72d896b483d93e4f9812d6c1ad831775;p=dealii.git Add a Tpetra-based version of Trilinos SparseMatrix and SparsityPattern. --- diff --git a/cmake/config/template-arguments.in b/cmake/config/template-arguments.in index 60997c9769..151402614b 100644 --- a/cmake/config/template-arguments.in +++ b/cmake/config/template-arguments.in @@ -242,6 +242,8 @@ TRIANGULATIONS := { Triangulation; parallel::distributed::Triangulation; parallel::fullydistributed::Triangulation; } +TRILINOS_SCALARS := { @DEAL_II_EXPAND_TPETRA_TYPES@; } + // all supported logical dimensions DIMENSIONS := { 1; 2; 3 } diff --git a/cmake/configure/configure_20_trilinos.cmake b/cmake/configure/configure_20_trilinos.cmake index 65ba12e0e4..97fd5d9edd 100644 --- a/cmake/configure/configure_20_trilinos.cmake +++ b/cmake/configure/configure_20_trilinos.cmake @@ -481,6 +481,7 @@ macro(feature_trilinos_configure_external) if(${DEAL_II_TRILINOS_WITH_TPETRA}) if(DEAL_II_HAVE_TPETRA_INST_DOUBLE) + set(DEAL_II_EXPAND_TPETRA_TYPES "double") set(DEAL_II_EXPAND_TPETRA_VECTOR_DOUBLE "LinearAlgebra::TpetraWrappers::Vector") endif() if(DEAL_II_HAVE_TPETRA_INST_FLOAT) diff --git a/doc/news/changes/major/20231122SebastianKinnewig b/doc/news/changes/major/20231122SebastianKinnewig new file mode 100644 index 0000000000..c40b0062ee --- /dev/null +++ b/doc/news/changes/major/20231122SebastianKinnewig @@ -0,0 +1,5 @@ +New: LinearAlgebra::TpetraWrappers::SparseMatrix class +that implements a wrapper for Tpetra::CrsMatrix. +
+(Sebastian Kinnewig, 2023/11/22) + diff --git a/include/deal.II/base/template_constraints.h b/include/deal.II/base/template_constraints.h index f2c2f6e9fb..ba3f8fb1e8 100644 --- a/include/deal.II/base/template_constraints.h +++ b/include/deal.II/base/template_constraints.h @@ -670,7 +670,10 @@ namespace LinearAlgebra { template class Vector; - } + + template + class SparseMatrix; + } // namespace TpetraWrappers # endif } // namespace LinearAlgebra #endif diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index fe265b8594..936a76437f 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -47,6 +47,8 @@ #include #include #include +#include +#include #include #include diff --git a/include/deal.II/lac/trilinos_tpetra_sparse_matrix.h b/include/deal.II/lac/trilinos_tpetra_sparse_matrix.h new file mode 100644 index 0000000000..ce4acdc172 --- /dev/null +++ b/include/deal.II/lac/trilinos_tpetra_sparse_matrix.h @@ -0,0 +1,743 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_trilinos_tpetra_sparse_matrix_h +#define dealii_trilinos_tpetra_sparse_matrix_h + +#include + +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + +# include +# include +# include + +# include +# include + +// Tpetra includes +# include +# include + + +DEAL_II_NAMESPACE_OPEN + +// forward declarations +# ifndef DOXYGEN +namespace LinearAlgebra +{ + namespace TpetraWrappers + { + template + class SparsityPattern; + } // namespace TpetraWrappers +} // namespace LinearAlgebra +# endif + +namespace LinearAlgebra +{ + + namespace TpetraWrappers + { + /** + * This class implements a wrapper to use the Trilinos distributed sparse + * matrix class + * Tpetra::CrsMatrix. + * This is precisely the kind of matrix we deal with all the time - we + * most likely get it from some assembly process, where also entries not + * locally owned might need to be written and hence need to be forwarded + * to the owner process. This class is designed to be used in a distributed + * memory architecture with an MPI compiler on the bottom, but it works + * equally well for serial processes. The only requirement for this class to + * work is that Trilinos has been installed with the same compiler as is + * used for generating deal.II. + * + * Moreover, this class takes an optional template argument for + * Kokkos::Nodes, allowing the usage of different Kokkos::Nodes. + * Kokkos allows the writing of portable applications targeting, + * for example, CUDA, OpenMP, Serial, or Threads, as backends for + * the execution and memory spaces. The backend is chosen by + * choosing the corresponding Kokkos Node. + * + * The interface of this class is modeled after the existing SparseMatrix + * class in deal.II. It has almost the same member functions and is often + * exchangeable. This class is templated and can be used with different + * scalar types. However, Trilinos need to be installed with complex support + * for usage with complex scalar types. + * + * @note You need to call SparseMatrix::compress() before you actually use + * the matrix. This calls + * Tpetra::fillComplete + * that compresses the storage format for sparse matrices by discarding + * unused elements and prepares the matrix for further usage + * (e.g., for matrix-vector products). + * However, to continue assembling the matrix, you need to call + * SparseMatrix::resume_fill() first. Once you finish modifying + * the matrix, you must call SparseMatrix::compress() again. + */ + template + class SparseMatrix : public Subscriptor + { + public: + /** + * Declare the type for container size. + */ + using size_type = dealii::types::global_dof_index; + + /** + * Declare an alias for the type used to store matrix elements, in analogy + * to all the other container classes. + */ + using value_type = Number; + + /** + * Typedef for Tpetra::CrsMatrix + */ + using MatrixType = + Tpetra::CrsMatrix; + + /** + * Typedef for Tpetra::Map + */ + using MapType = Tpetra::Map; + + /** + * Typedef for Tpetra::CrsGraph + */ + using GraphType = + Tpetra::CrsGraph; + + /** + * @name Constructors and initialization. + */ + /** @{ */ + /** + * Default constructor. Generates an empty (zero-size) matrix. + */ + SparseMatrix(); + + /** + * Generate a matrix from a TpetraWrappers::SparsityPattern object. + */ + SparseMatrix(const SparsityPattern &sparsity_pattern); + + /** + * Move constructor. Create a new sparse matrix by stealing the internal + * data of the `other` object. + */ + SparseMatrix(SparseMatrix &&other) noexcept; + + /** + * Copy constructor is deleted. + */ + SparseMatrix(const SparseMatrix &) = delete; + + /** + * operator= is deleted. + */ + SparseMatrix & + operator=(const SparseMatrix &) = delete; + + /** + * Move assignment operator. + */ + SparseMatrix & + operator=(SparseMatrix &&other) noexcept; + + /** + * Destructor. Made virtual so that one can use pointers to objects of + * this class. + */ + virtual ~SparseMatrix() override = default; + + /** + * This function initializes the Trilinos matrix with a deal.II sparsity + * pattern, i.e. it makes the underlying Trilinos Tpetra::CrsMatrix know + * the position of nonzero entries according to the sparsity pattern. This + * function is meant for use in serial programs, where there is no need to + * specify how the matrix is going to be distributed among different + * processors. This function works in %parallel, too, but it is + * recommended to manually specify the %parallel partitioning of the + * matrix using a Tpetra::Map. When run in %parallel, it is currently + * necessary that each processor holds the sparsity_pattern structure + * because each processor sets its rows. + * + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. + */ + template + void + reinit(const SparsityPatternType &sparsity_pattern); + + /** + * This function reinitializes the Trilinos sparse matrix from a + * (possibly distributed) Trilinos sparsity pattern. It also works + * in parallel. In that case, the partitioning of the Trilinos + * sparsity pattern is used. + * + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. + */ + void + reinit(const SparsityPattern &sparsity_pattern); + /** @} */ + + /** + * @name Constructors and initialization using an IndexSet description + */ + /** @{ */ + /** + * Constructor using an IndexSet and an MPI communicator to describe the + * %parallel partitioning. The parameter @p n_max_entries_per_row sets the + * number of nonzero entries in each row that will be allocated. Note that + * this number does not need to be exact, and it is even allowed that the + * actual matrix structure has more nonzero entries than specified in the + * constructor. However it is still advantageous to provide good estimates + * here since this will considerably increase the performance of the + * matrix setup. However, there is no effect in the performance of + * matrix-vector products, since Trilinos reorganizes the matrix memory + * prior to use (in the compress() step). + */ + SparseMatrix(const IndexSet ¶llel_partitioning, + const MPI_Comm communicator = MPI_COMM_WORLD, + const unsigned int n_max_entries_per_row = 0); + + /** + * Same as before, but now set the number of non-zero entries in each + * matrix row separately. Since we know the number of elements in the + * matrix exactly in this case, we can already allocate the right amount + * of memory, which makes the creation process including the insertion of + * nonzero elements by the respective SparseMatrix::reinit call + * considerably faster. + */ + SparseMatrix(const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row); + + /** + * This constructor is similar to the one above, but it now takes two + * different IndexSet partitions for row and columns. This interface is + * meant to be used for generating rectangular matrices, where the first + * index set describes the %parallel partitioning of the degrees of + * freedom associated with the matrix rows and the second one the + * partitioning of the matrix columns. The second index set specifies the + * partitioning of the vectors this matrix is to be multiplied with, not + * the distribution of the elements that actually appear in the matrix. + * + * The parameter @p n_max_entries_per_row defines how much memory will be + * allocated for each row. This number does not need to be accurate, as + * the structure is reorganized in the compress() call. + */ + SparseMatrix(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator = MPI_COMM_WORLD, + const size_type n_max_entries_per_row = 0); + + /** + * Same as before, but now set the number of non-zero entries in each + * matrix row separately. Since we know the number of elements in the + * matrix exactly in this case, we can already allocate the right amount + * of memory, which makes the creation process including the insertion of + * nonzero elements by the respective SparseMatrix::reinit call + * considerably faster. + */ + SparseMatrix(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row); + + /** + * This function is initializes the Trilinos Tpetra matrix according to + * the specified @p sparsity_pattern, and also reassigns the matrix rows to + * different processes according to the user-supplied index set @p parallel_partitioning and + * %parallel communicator. In programs following the style of the tutorial + * programs, this function (and the respective call for a rectangular + * matrix) are the natural way to initialize the matrix size, its + * distribution among the MPI processes (if run in %parallel) as well as + * the location of non-zero elements. Trilinos stores the sparsity pattern + * internally, so it won't be needed any more after this call, in contrast + * to the deal.II own object. The optional argument @p exchange_data can + * be used for reinitialization with a sparsity pattern that is not fully + * constructed. If the flag is not set, each + * processor just sets the elements in the sparsity pattern that belong to + * its rows. + * + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. + */ + template + void + reinit(const IndexSet ¶llel_partitioning, + const SparsityPatternType &sparsity_pattern, + const MPI_Comm communicator = MPI_COMM_WORLD, + const bool exchange_data = false); + + /** + * This function is similar to the other initialization function above, + * but now also reassigns the matrix rows and columns according to two + * user-supplied index sets. To be used for rectangular matrices. The + * optional argument @p exchange_data can be used for reinitialization + * with a sparsity pattern that is not fully constructed. This feature is + * only implemented for input sparsity patterns of type + * DynamicSparsityPattern. + * + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. + */ + template + void + reinit(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityPatternType &sparsity_pattern, + const MPI_Comm communicator = MPI_COMM_WORLD, + const bool exchange_data = false); + /** @} */ + + /** + * @name Information on the matrix + */ + /** @{ */ + /** + * Return the number of rows in this matrix. + */ + dealii::types::signed_global_dof_index + m() const; + + /** + * Return the number of columns in this matrix. + */ + dealii::types::signed_global_dof_index + n() const; + + + /** + * Return the local dimension of the matrix, i.e. the number of rows + * stored on the present MPI process. For sequential matrices, this number + * is the same as m(), but for %parallel matrices it may be smaller. + * + * To figure out which elements exactly are stored locally, use + * local_range(). + */ + unsigned int + local_size() const; + + /** + * Return a pair of indices indicating which rows of this matrix are + * stored locally. The first number is the index of the first row stored, + * the second the index of the one past the last one that is stored + * locally. If this is a sequential matrix, then the result will be the + * pair (0,m()), otherwise it will be a pair (i,i+n), where + * n=local_size(). + */ + std::pair + local_range() const; + + /** + * Return the total number of nonzero elements of this matrix (summed + * over all MPI processes). + */ + size_t + n_nonzero_elements() const; + + /** + * Return the state of the matrix, i.e., whether compress() needs to be + * called after an operation requiring data exchange. A call to compress() + * is also needed when the method set() has been called (even when working + * in serial). + */ + bool + is_compressed() const; + + /** + * Return the underlying MPI communicator. + */ + MPI_Comm + get_mpi_communicator() const; + /** @} */ + + /** + * @name Modifying entries + */ + /** @{ */ + /** + * This operator assigns a scalar to a matrix. Since this does usually not + * make much sense (should we set all matrix entries to this value? Only + * the nonzero entries of the sparsity pattern?), this operation is only + * allowed if the actual value to be assigned is zero. This operator only + * exists to allow for the obvious notation matrix=0, which sets + * all elements of the matrix to zero, but keeps the sparsity pattern + * previously used. + */ + SparseMatrix & + operator=(const double d); + + /** + * Multiply the entire matrix by a fixed factor. + */ + SparseMatrix & + operator*=(const Number factor); + + /** + * Divide the entire matrix by a fixed factor. + */ + SparseMatrix & + operator/=(const Number factor); + + /** + * Add @p value to the element (i,j). + * Just as the respective call in deal.II SparseMatrix + * class. Moreover, if value is not a finite number an exception + * is thrown. + * + * @note When add is called on a compressed matrix, the matrix is set + * back to an uncompressed state. + */ + void + add(const size_type i, const size_type j, const TrilinosScalar value); + + /** + * Add an array of values given by values in the given global + * matrix row at columns specified by col_indices in the sparse matrix. + * Just as the respective call in deal.II SparseMatrix + * class. The optional parameter elide_zero_values can be used to + * specify whether zero values should be added anyway or these should be + * filtered away and only non-zero data is added. + * The default value is true, i.e., zero values won't be added + * into the matrix. + * + * @note When add is called on a compressed matrix, the matrix is set + * back to an uncompressed state. + */ + void + add(const size_type row, + const size_type n_cols, + const size_type *col_indices, + const TrilinosScalar *values, + const bool elide_zero_values = true, + const bool col_indices_are_sorted = false); + /** @} */ + + /** + * @name Multiplications + */ + /** @{ */ + /* + * Matrix-vector multiplication: let dst = M*src with M + * being this matrix. + * + * Source and destination must not be the same vector. + * + * The vector @p dst has to be initialized with the same IndexSet that was + * used for the row indices of the matrix and the vector @p src has to be + * initialized with the same IndexSet that was used for the column indices + * of the matrix. + */ + void + vmult(Vector &dst, const Vector &src) const; + + /* + * Matrix-vector multiplication: let dst = MT*src with + * M being this matrix. This function does the same as vmult() but + * takes the transposed matrix. + * + * Source and destination must not be the same vector. + */ + void + Tvmult(Vector &dst, const Vector &src) const; + + /** + * Adding matrix-vector multiplication. Add M*src on dst + * with M being this matrix. + * + * Source and destination must not be the same vector. + */ + void + vmult_add(Vector &dst, const Vector &src) const; + + + /** + * Adding matrix-vector multiplication. Add MT*src to + * dst with M being this matrix. This function does the same + * as vmult_add() but takes the transposed matrix. + * + * Source and destination must not be the same vector. + */ + void + Tvmult_add(Vector &dst, const Vector &src) const; + /** @} */ + + /** + * @name Mixed Stuff + */ + /** @{ */ + /** + * Print the matrix to the given stream, using the format (line,col) + * value, i.e. one nonzero entry of the matrix per line. The optional flag + * outputs the sparsity pattern in Trilinos style, where the data is + * sorted according to the processor number when printed to the stream, as + * well as a summary of the matrix like the global size. + */ + void + print(std::ostream &out, + const bool print_detailed_trilinos_information = false) const; + + /** + * This command does two things: + *
    + *
  • If the matrix was initialized without a sparsity pattern, elements + * have been added manually using the set() command. When this process is + * completed, a call to compress() reorganizes the internal data + * structures (sparsity pattern) so that a fast access to data is possible + * in matrix-vector products. + *
  • If the matrix structure has already been fixed (either by + * initialization with a sparsity pattern or by calling compress() during + * the setup phase), this command does the %parallel exchange of data. + * This is necessary when we perform assembly on more than one (MPI) + * process, because then some non-local row data will accumulate on nodes + * that belong to the current's processor element, but are actually held + * by another. This command is usually called after all elements have been + * traversed. + *
+ * + * In both cases, this function compresses the data structures and allows + * the resulting matrix to be used in all other operations like matrix- + * vector products. This is a collective operation, i.e., it needs to be + * run on all processors when used in %parallel. + * + * See + * @ref GlossCompress "Compressing distributed objects" + * for more information. + * + * @note The @p operation can be safely omitted, as that parameter is not + * used at all and is only present to ensure compatibility with other + * SparseMatrix classes. + */ + void + compress(VectorOperation::values operation); + + /** + * This function must be called to allow for changes to the structure + * of the matrix again after compress() was called. + * Once you are done modifying the matrix structure, you must call + * compress() again. + */ + void + resume_fill(); + + /** + * Return a const reference to the underlying Trilinos + * Tpetra::CrsMatrix + * class. + */ + const MatrixType & + trilinos_matrix() const; + + /** + * Return a (modifiable) reference to the underlying Trilinos + * Tpetra::CrsMatrix + * class. + */ + MatrixType & + trilinos_matrix(); + + /** + * Return a const + * Teuchos::RCP + * to the underlying Trilinos + * Tpetra::CrsMatrix + * class. + */ + Teuchos::RCP + trilinos_rcp() const; + + /** + * Return a (modifiable) + * Teuchos::RCP + * to the underlying Trilinos + * Tpetra::CrsMatrix + * class. + */ + Teuchos::RCP + trilinos_rcp(); + /** @} */ + + /** + * @addtogroup Exceptions + */ + /** @{ */ + + /** + * Exception + */ + DeclException0(ExcMatrixNotCompressed); + + /** + * Exception + */ + DeclExceptionMsg( + ExcSourceEqualsDestination, + "You are attempting an operation on two vectors that " + "are the same object, but the operation requires that the " + "two objects are in fact different."); + + /* + * Exception + */ + DeclExceptionMsg(ExcColMapMissmatch, + "The column partitioning of a matrix does not match " + "the partitioning of a vector you are trying to " + "multiply it with. Are you multiplying the " + "matrix with a vector that has ghost elements?"); + + /* + * Exception + */ + DeclExceptionMsg(ExcDomainMapMissmatch, + "The row partitioning of a matrix does not match " + "the partitioning of a vector you are trying to " + "put the result of a matrix-vector product in. " + "Are you trying to put the product of the " + "matrix with a vector into a vector that has " + "ghost elements?"); + /** @} */ + + private: + /** + * Pointer to the user-supplied Tpetra Trilinos mapping of the matrix + * columns that assigns parts of the matrix to the individual processes. + * + * @note The Trilinos matrix is row-oriented, and the row_space_map is + * therefore stored in the Trilinos matrix itself. The additional + * information from the column space map is used to speed up the + * assembly process. + */ + Teuchos::RCP column_space_map; + + /** + * A sparse matrix object in Trilinos to be used for finite element based + * problems which allows for assembling into non-local elements. The + * actual type, a sparse matrix, is set in the constructor. + */ + Teuchos::RCP matrix; + + /** + * A boolean variable to hold information on whether the matrix is + * fill complete or if the matrix is in compute mode. + */ + bool compressed; + + }; // class SparseMatrix + + + /* ------------------------- Inline functions ---------------------- */ + + template + inline void + SparseMatrix::add(const size_type i, + const size_type j, + const TrilinosScalar value) + { + add(i, 1, &j, &value, false); + } + + + + template + inline dealii::types::signed_global_dof_index + SparseMatrix::m() const + { + return matrix->getRowMap()->getGlobalNumElements(); + } + + + + template + inline dealii::types::signed_global_dof_index + SparseMatrix::n() const + { + // If the matrix structure has not been fixed (i.e., we did not have a + // sparsity pattern), it does not know about the number of columns, so we + // must always take this from the additional column space map + Assert(column_space_map.get() != nullptr, ExcInternalError()); + return column_space_map->getGlobalNumElements(); + } + + + + template + inline bool + SparseMatrix::is_compressed() const + { + return compressed; + } + + + + template + inline const Tpetra:: + CrsMatrix & + SparseMatrix::trilinos_matrix() const + { + return *matrix; + } + + + + template + inline Tpetra::CrsMatrix & + SparseMatrix::trilinos_matrix() + { + return *matrix; + } + + + + template + inline Teuchos::RCP< + const Tpetra::CrsMatrix> + SparseMatrix::trilinos_rcp() const + { + return matrix.getConst(); + } + + + + template + inline Teuchos::RCP< + Tpetra::CrsMatrix> + SparseMatrix::trilinos_rcp() + { + return matrix; + } + + } // namespace TpetraWrappers + +} // namespace LinearAlgebra + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_TRILINOS_WITH_TPETRA + +#endif // dealii_trilinos_tpetra_sparse_matrix_h diff --git a/include/deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h b/include/deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h new file mode 100644 index 0000000000..ddbde983fc --- /dev/null +++ b/include/deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h @@ -0,0 +1,695 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 - 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_trilinos_tpetra_sparse_matrix_templates_h +#define dealii_trilinos_tpetra_sparse_matrix_templates_h + +#include + +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + +# include +# include +# include + +DEAL_II_NAMESPACE_OPEN + +namespace LinearAlgebra +{ + + namespace TpetraWrappers + { + // reinit_matrix(): + namespace + { + using size_type = dealii::types::signed_global_dof_index; + + template + using MapType = + Tpetra::Map; + + template + using MatrixType = + Tpetra::CrsMatrix; + + template + using GraphType = + Tpetra::CrsGraph; + + template + void + reinit_matrix(const IndexSet &row_parallel_partitioning, + const IndexSet &column_parallel_partitioning, + const SparsityPatternType &sparsity_pattern, + const bool exchange_data, + const MPI_Comm communicator, + Teuchos::RCP> &column_space_map, + Teuchos::RCP> &matrix) + { + // release memory before reallocation + matrix.reset(); + + // Get the Tpetra::Maps + Teuchos::RCP> row_space_map = + row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + + column_space_map = + column_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + + if (column_space_map->getComm()->getRank() == 0) + { + AssertDimension(sparsity_pattern.n_rows(), + row_parallel_partitioning.size()); + AssertDimension(sparsity_pattern.n_cols(), + column_parallel_partitioning.size()); + } + + // if we want to exchange data, build a usual Trilinos sparsity pattern + // and let that handle the exchange. otherwise, manually create a + // CrsGraph, which consumes considerably less memory because it can set + // correct number of indices right from the start + if (exchange_data) + { + SparsityPattern trilinos_sparsity; + trilinos_sparsity.reinit(row_parallel_partitioning, + column_parallel_partitioning, + sparsity_pattern, + communicator, + exchange_data); + matrix = Utilities::Trilinos::internal::make_rcp< + MatrixType>( + trilinos_sparsity.trilinos_sparsity_pattern()); + + return; + } + + IndexSet relevant_rows(sparsity_pattern.row_index_set()); + // serial case + if (relevant_rows.size() == 0) + { + relevant_rows.set_size(row_space_map->getGlobalNumElements()); + relevant_rows.add_range(0, row_space_map->getGlobalNumElements()); + } + relevant_rows.compress(); + + + std::vector ghost_rows; + Teuchos::Array n_entries_per_row( + row_space_map->getLocalNumElements()); + { + size_type own = 0; + for (const auto global_row : relevant_rows) + { + if (row_space_map->isNodeGlobalElement(global_row)) + n_entries_per_row[own++] = + sparsity_pattern.row_length(global_row); + } + } + + // The deal.II notation of a Sparsity pattern corresponds to the Tpetra + // concept of a Graph. Hence, we generate a graph by copying the + // sparsity pattern into it, and then build up the matrix from the + // graph. This is considerable faster than directly filling elements + // into the matrix. Moreover, it consumes less memory, since the + // internal reordering is done on ints only, and we can leave the + // doubles aside. + Teuchos::RCP> graph; + + graph = Utilities::Trilinos::internal::make_rcp>( + row_space_map, n_entries_per_row()); + + // This functions assumes that the sparsity pattern sits on all + // processors (completely). The parallel version uses a Tpetra graph + // that is already distributed. + + // now insert the indices + std::vector row_indices; + + for (const auto global_row : relevant_rows) + { + const int row_length = sparsity_pattern.row_length(global_row); + if (row_length == 0) + continue; + + row_indices.resize(row_length, -1); + for (size_type col = 0; col < row_length; ++col) + row_indices[col] = + sparsity_pattern.column_number(global_row, col); + + AssertIndexRange(global_row, row_space_map->getGlobalNumElements()); + graph->insertGlobalIndices(global_row, + row_length, + row_indices.data()); + } + + // Eventually, optimize the graph structure (sort indices, make memory + // contiguous, etc.). note that the documentation of the function indeed + // states that we first need to provide the column (domain) map and then + // the row (range) map + graph->fillComplete(column_space_map, row_space_map); + + // check whether we got the number of columns right. + AssertDimension(sparsity_pattern.n_cols(), graph->getGlobalNumCols()); + + // And now finally generate the matrix. + matrix = + Utilities::Trilinos::internal::make_rcp>( + graph); + } + } // namespace + + + + // Constructors and initialization: + + // The constructor is actually the only point where we have to check + // whether we build a serial or a parallel Trilinos matrix. + // Actually, it does not even matter how many threads there are, but + // only if we use an MPI compiler or a standard compiler. So, even one + // thread on a configuration with MPI will still get a parallel interface. + template + SparseMatrix::SparseMatrix() + : column_space_map(Utilities::Trilinos::internal::make_rcp( + 0, + 0, + Utilities::Trilinos::tpetra_comm_self())) + { + // Prepare the graph + Teuchos::RCP graph = + Utilities::Trilinos::internal::make_rcp(column_space_map, + column_space_map, + 0); + graph->fillComplete(); + + // Create the matrix from the graph + matrix = Utilities::Trilinos::internal::make_rcp(graph); + + compressed = false; + } + + + + template + SparseMatrix::SparseMatrix( + const SparsityPattern &sparsity_pattern) + : matrix(Utilities::Trilinos::internal::make_rcp( + sparsity_pattern.trilinos_sparsity_pattern())) + { + column_space_map = + Teuchos::rcp_const_cast(sparsity_pattern.domain_partitioner()); + compressed = false; + compress(VectorOperation::add); + } + + + + template + SparseMatrix::SparseMatrix( + SparseMatrix &&other) noexcept + : column_space_map(std::move(other.column_space_map)) + , matrix(std::move(other.matrix)) + , compressed(std::move(other.compressed)) + { + other.compressed = false; + } + + + + template + SparseMatrix & + SparseMatrix::operator=( + SparseMatrix &&other) noexcept + { + column_space_map = std::move(other.column_space_map); + matrix = std::move(other.matrix); + compressed = std::move(other.compressed); + + return *this; + } + + + + template + template + void + SparseMatrix::reinit( + const SparsityPatternType &sparsity_pattern) + { + reinit_matrix( + complete_index_set(sparsity_pattern.n_rows()), + complete_index_set(sparsity_pattern.n_cols()), + sparsity_pattern, + false, + MPI_COMM_SELF, + column_space_map, + matrix); + + compressed = false; + compress(VectorOperation::add); + } + + + + template + void + SparseMatrix::reinit( + const SparsityPattern &sparsity_pattern) + { + column_space_map.reset(); + matrix.reset(); + + // reinit with a (distributed) Trilinos sparsity pattern. + column_space_map = + Teuchos::rcp_const_cast(sparsity_pattern.domain_partitioner()); + matrix = Utilities::Trilinos::internal::make_rcp( + sparsity_pattern.trilinos_sparsity_pattern()); + + compressed = false; + compress(VectorOperation::add); + } + + + + // Constructors and initialization using an IndexSet description: + + template + SparseMatrix::SparseMatrix( + const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const unsigned int n_max_entries_per_row) + : column_space_map( + parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + , matrix(Utilities::Trilinos::internal::make_rcp( + column_space_map, + n_max_entries_per_row)) + , compressed(false) + {} + + + + template + SparseMatrix::SparseMatrix( + const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row) + : column_space_map( + parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + , compressed(false) + { + Teuchos::Array n_entries_per_row_array(n_entries_per_row.begin(), + n_entries_per_row.end()); + matrix = Utilities::Trilinos::internal::make_rcp( + column_space_map, n_entries_per_row_array()); + } + + + + template + SparseMatrix::SparseMatrix( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const size_type n_max_entries_per_row) + : column_space_map( + col_parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + , matrix(Utilities::Trilinos::internal::make_rcp( + row_parallel_partitioning.make_tpetra_map_rcp(communicator, false), + n_max_entries_per_row)) + , compressed(false) + {} + + + + template + SparseMatrix::SparseMatrix( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row) + : column_space_map( + col_parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + , compressed(false) + { + Teuchos::Array n_entries_per_row_array(n_entries_per_row.begin(), + n_entries_per_row.end()); + matrix = Utilities::Trilinos::internal::make_rcp( + row_parallel_partitioning.make_tpetra_map_rcp(communicator, false), + n_entries_per_row_array()); + } + + + + template + template + inline void + SparseMatrix::reinit( + const IndexSet ¶llel_partitioning, + const SparsityPatternType &sparsity_pattern, + const MPI_Comm communicator, + const bool exchange_data) + { + reinit(parallel_partitioning, + parallel_partitioning, + sparsity_pattern, + communicator, + exchange_data); + } + + + + template + template + void + SparseMatrix::reinit( + const IndexSet &row_parallel_partitioning, + + const IndexSet &col_parallel_partitioning, + const SparsityPatternType &sparsity_pattern, + const MPI_Comm communicator, + const bool exchange_data) + { + reinit_matrix( + row_parallel_partitioning, + col_parallel_partitioning, + sparsity_pattern, + exchange_data, + communicator, + column_space_map, + matrix); + + compressed = false; + compress(VectorOperation::add); + } + + + + // Information on the matrix + + template + inline unsigned int + SparseMatrix::local_size() const + { + return matrix->getLocalNumRows(); + } + + + + template + inline std::pair::size_type, + typename SparseMatrix::size_type> + SparseMatrix::local_range() const + { + size_type begin, end; + begin = matrix->getRowMap()->getMinLocalIndex(); + end = matrix->getRowMap()->getMaxLocalIndex() + 1; + + return std::make_pair(begin, end); + } + + + + template + inline size_t + SparseMatrix::n_nonzero_elements() const + { + return matrix->getGlobalNumEntries(); + } + + + + template + MPI_Comm + SparseMatrix::get_mpi_communicator() const + { + return Utilities::Trilinos::teuchos_comm_to_mpi_comm(matrix->getComm()); + } + + + + // Modifying entries + + template + SparseMatrix & + SparseMatrix::operator=(const double d) + { + (void)d; + Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue()); + + if (compressed) + { + matrix->resumeFill(); + compressed = false; + } + + // As checked above, we are only allowed to use d==0.0, so pass + // a constant zero (instead of a run-time value 'd' that *happens* to + // have a zero value) to the underlying class in hopes that the compiler + // can optimize this somehow. + matrix->setAllToScalar(/*d=*/0.0); + + return *this; + } + + + + template + SparseMatrix & + SparseMatrix::operator*=(const Number a) + { + matrix->scale(a); + return *this; + } + + + + template + SparseMatrix & + SparseMatrix::operator/=(const Number a) + { + Assert(a != 0, ExcDivideByZero()); + + const Number factor = 1.0 / a; + matrix->scale(factor); + return *this; + } + + + + template + void + SparseMatrix::add(const size_type row, + const size_type n_cols, + const size_type *col_indices, + const TrilinosScalar *values, + const bool elide_zero_values, + const bool /*col_indices_are_sorted*/) + { + AssertIndexRange(row, this->m()); + + // If the matrix is marked as compressed, we need to + // call resumeFill() first. + if (compressed || matrix->isFillComplete()) + { + matrix->resumeFill(); + compressed = false; + } + + // count zero entries; + const size_t n_zero_entries = + (elide_zero_values ? std::count(values, values + n_cols, Number(0)) : + 0); + + // Exit early if there is nothing to do + if (n_zero_entries == n_cols) + return; + + // Convert the input into Teuchos::Array + Teuchos::Array col_indices_array( + n_cols - n_zero_entries); + Teuchos::Array values_array(n_cols - n_zero_entries); + if (elide_zero_values) + { + size_t n_columns = 0; + for (size_t i = 0; i < n_cols; ++i) + { + // skip all zero entries, while filling the + if (values[i] != 0) + { + AssertIsFinite(values[i]); + AssertIndexRange(col_indices[i], n()); + AssertIndexRange(n_columns, n_zero_entries); + col_indices_array[n_columns] = col_indices[i]; + values_array[n_columns] = values[i]; + ++n_columns; + } + } + } + else + for (size_t i = 0; i < n_cols; ++i) + { + AssertIsFinite(values[i]); + AssertIndexRange(col_indices[i], n()); + col_indices_array[i] = col_indices[i]; + values_array[i] = values[i]; + } + + // Sum the values into the global matrix. + matrix->sumIntoGlobalValues(row, col_indices_array, values_array); + } + + + + // Multiplications + + template + void + SparseMatrix::vmult(Vector &dst, + const Vector &src) const + { + Assert(&src != &dst, ExcSourceEqualsDestination()); + Assert(matrix->isFillComplete(), ExcMatrixNotCompressed()); + Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()), + ExcColMapMissmatch()); + Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()), + ExcDomainMapMissmatch()); + matrix->apply(*src.trilinos_rcp(), *dst.trilinos_rcp()); + } + + + + template + void + SparseMatrix::Tvmult(Vector &dst, + const Vector &src) const + { + Assert(&src != &dst, ExcSourceEqualsDestination()); + Assert(matrix->isFillComplete(), ExcMatrixNotCompressed()); + Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()), + ExcColMapMissmatch()); + Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()), + ExcDomainMapMissmatch()); + matrix->apply(*src.trilinos_rcp(), *dst.trilinos_rcp(), Teuchos::TRANS); + } + + + + template + void + SparseMatrix::vmult_add(Vector &dst, + const Vector &src) const + { + Assert(&src != &dst, ExcSourceEqualsDestination()); + Assert(matrix->isFillComplete(), ExcMatrixNotCompressed()); + Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()), + ExcColMapMissmatch()); + Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()), + ExcDomainMapMissmatch()); + matrix->apply(*src.trilinos_rcp(), + *dst.trilinos_rcp(), + Teuchos::NO_TRANS, + Teuchos::ScalarTraits::one(), + Teuchos::ScalarTraits::one()); + } + + + + template + void + SparseMatrix::Tvmult_add(Vector &dst, + const Vector &src) const + { + Assert(&src != &dst, ExcSourceEqualsDestination()); + Assert(matrix->isFillComplete(), ExcMatrixNotCompressed()); + Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()), + ExcColMapMissmatch()); + Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()), + ExcDomainMapMissmatch()); + matrix->apply(*src.trilinos_rcp(), + *dst.trilinos_rcp(), + Teuchos::TRANS, + Teuchos::ScalarTraits::one(), + Teuchos::ScalarTraits::one()); + } + + + template + void + SparseMatrix::print( + std::ostream &out, + const bool print_detailed_trilinos_information) const + { + if (print_detailed_trilinos_information) + { + auto teuchos_out = Teuchos::getFancyOStream(Teuchos::rcpFromRef(out)); + matrix->describe(*teuchos_out, Teuchos::VERB_EXTREME); + } + else + { + typename MatrixType::values_host_view_type values; + typename MatrixType::local_inds_host_view_type indices; + + for (size_t i = 0; i < matrix->getLocalNumRows(); ++i) + { + matrix->getLocalRowView(i, indices, values); + + for (size_t j = 0; j < indices.size(); ++j) + out << "(" << matrix->getRowMap()->getGlobalElement(i) << "," + << matrix->getColMap()->getGlobalElement(indices[j]) << ") " + << values[j] << std::endl; + } + } + + AssertThrow(out.fail() == false, ExcIO()); + } + + + + template + void + SparseMatrix::compress( + [[maybe_unused]] VectorOperation::values operation) + { + if (!compressed) + { + matrix->fillComplete(column_space_map, matrix->getRowMap()); + compressed = true; + } + } + + template + void + SparseMatrix::resume_fill() + { + if (compressed) + { + matrix->resumeFill(); + compressed = false; + } + } + + } // namespace TpetraWrappers + +} // namespace LinearAlgebra + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_TRILINOS_WITH_TPETRA + +#endif // dealii_trilinos_tpetra_sparse_matrix_templates_h diff --git a/include/deal.II/lac/trilinos_tpetra_sparsity_pattern.h b/include/deal.II/lac/trilinos_tpetra_sparsity_pattern.h new file mode 100644 index 0000000000..de710eb24f --- /dev/null +++ b/include/deal.II/lac/trilinos_tpetra_sparsity_pattern.h @@ -0,0 +1,1385 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_trilinos_tpetra_sparsity_pattern_h +#define dealii_trilinos_tpetra_sparsity_pattern_h + +#include + +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + +# include +# include +# include + +# include +# include +# include + +# include + +# include +# include +# include + + +DEAL_II_NAMESPACE_OPEN + +// forward declarations +# ifndef DOXYGEN +class DynamicSparsityPattern; + +namespace LinearAlgebra +{ + namespace TpetraWrappers + { + template + class SparsityPattern; + + template + class SparseMatrix; + + namespace SparsityPatternIterators + { + template + class Iterator; + } + } // namespace TpetraWrappers +} // namespace LinearAlgebra +# endif + +namespace LinearAlgebra +{ + namespace TpetraWrappers + { + namespace SparsityPatternIterators + { + /** + * Accessor class for iterators into sparsity patterns. This class is also + * the base class for both const and non-const accessor classes into + * sparse matrices. + * + * Note that this class only allows read access to elements, providing + * their row and column number. It does not allow modifying the sparsity + * pattern itself. + * + * @ingroup TrilinosWrappers + */ + template + class Accessor + { + public: + /** + * Declare type for container size. + */ + using size_type = dealii::types::signed_global_dof_index; + + /** + * Constructor. + */ + Accessor(const SparsityPattern *sparsity_pattern, + const size_type row, + const size_type index); + + /** + * Row number of the element represented by this object. + */ + size_type + row() const; + + /** + * Index in row of the element represented by this object. + */ + size_type + index() const; + + /** + * Column number of the element represented by this object. + */ + size_type + column() const; + + /** + * Exception + */ + DeclException0(ExcBeyondEndOfSparsityPattern); + + /** + * Exception + */ + DeclException3(ExcAccessToNonlocalRow, + size_type, + size_type, + size_type, + << "You tried to access row " << arg1 + << " of a distributed sparsity pattern, " + << " but only rows " << arg2 << " through " << arg3 + << " are stored locally and can be accessed."); + + private: + /** + * The matrix accessed. + */ + SparsityPattern *sparsity_pattern; + + /** + * Current row number. + */ + size_type a_row; + + /** + * Current index in row. + */ + size_type a_index; + + /** + * Cache where we store the column indices of the present row. This is + * necessary, since Trilinos makes access to the elements of its + * matrices rather hard, and it is much more efficient to copy all + * column entries of a row once when we enter it than repeatedly asking + * Trilinos for individual ones. This also makes some sense since it is + * likely that we will access them sequentially anyway. + * + * In order to make copying of iterators/accessor of acceptable + * performance, we keep a shared pointer to these entries so that more + * than one accessor can access this data if necessary. + */ + std::shared_ptr> + colnum_cache; + + /** + * Discard the old row caches (they may still be used by other + * accessors) and generate new ones for the row pointed to presently by + * this accessor. + */ + void + visit_present_row(); + + // Make enclosing class a friend. + friend class Iterator; + }; + + /** + * Iterator class for sparsity patterns of type + * TrilinosWrappers::SparsityPattern. Access to individual elements of the + * sparsity pattern is handled by the Accessor class in this namespace. + */ + template + class Iterator + { + public: + /** + * Declare type for container size. + */ + using size_type = size_t; + + /** + * Constructor. Create an iterator into the matrix @p matrix for the + * given row and the index within it. + */ + Iterator(const SparsityPattern *sparsity_pattern, + const size_type row, + const size_type index); + + /** + * Copy constructor. + */ + Iterator(const Iterator &i); + + /** + * Prefix increment. + */ + Iterator & + operator++(); + + /** + * Postfix increment. + */ + Iterator + operator++(int); + + /** + * Dereferencing operator. + */ + const Accessor & + operator*() const; + + /** + * Dereferencing operator. + */ + const Accessor * + operator->() const; + + /** + * Comparison. True, if both iterators point to the same matrix + * position. + */ + bool + operator==(const Iterator &) const; + + /** + * Inverse of ==. + */ + bool + operator!=(const Iterator &) const; + + /** + * Comparison operator. Result is true if either the first row number is + * smaller or if the row numbers are equal and the first index is + * smaller. + */ + bool + operator<(const Iterator &) const; + + /** + * Exception + */ + DeclException2(ExcInvalidIndexWithinRow, + size_type, + size_type, + << "Attempt to access element " << arg2 << " of row " + << arg1 << " which doesn't have that many elements."); + + private: + /** + * Store an object of the accessor class. + */ + Accessor accessor; + + friend class TpetraWrappers::SparsityPattern; + }; + + } // namespace SparsityPatternIterators + + + /** + * This class implements a wrapper class to use the Trilinos distributed + * sparsity pattern class Tpetra::CrsGraph. This class is designed to be + * used for construction of %parallel Trilinos matrices. The functionality + * of this class is modeled after the existing sparsity pattern classes, + * with the difference that this class can work fully in %parallel according + * to a partitioning of the sparsity pattern rows. + * + * This class has many similarities to the DynamicSparsityPattern, since it + * can dynamically add elements to the pattern without any memory being + * previously reserved for it. However, it also has a method + * SparsityPattern::compress(), that finalizes the pattern and + * enables its use with Trilinos sparse matrices. + * + * @ingroup TrilinosWrappers + * @ingroup Sparsity + */ + template + class SparsityPattern : public SparsityPatternBase + { + public: + /** + * Declare type for container size. + */ + using size_type = dealii::types::signed_global_dof_index; + + /** + * Declare an alias for the iterator class. + */ + using const_iterator = SparsityPatternIterators::Iterator; + + /** + * Typedef for Tpetra::Map + */ + using MapType = + Tpetra::Map; + + /** + * Typedef for Tpetra:Graph + */ + using GraphType = + Tpetra::CrsGraph; + + /** + * @name Basic constructors and initialization + */ + /** @{ */ + /** + * Default constructor. Generates an empty (zero-size) sparsity pattern. + */ + SparsityPattern(); + + /** + * Generate a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely + * stored locally, too. + * + * It is possible to specify the number of columns entries per row using + * the optional @p n_entries_per_row argument. However, this value does + * not need to be accurate or even given at all, since one does usually + * not have this kind of information before building the sparsity pattern + * (the usual case when the function DoFTools::make_sparsity_pattern() is + * called). The entries are allocated dynamically in a similar manner as + * for the deal.II DynamicSparsityPattern classes. However, a good + * estimate will reduce the setup time of the sparsity pattern. + */ + SparsityPattern(const size_type m, + const size_type n, + const size_type n_entries_per_row = 0); + + /** + * Generate a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely + * stored locally, too. + * + * The vector n_entries_per_row specifies the number of entries + * in each row (an information usually not available, though). + */ + SparsityPattern(const size_type m, + const size_type n, + const std::vector &n_entries_per_row); + + /** + * Move constructor. Create a new sparse matrix by stealing the internal + * data. + */ + SparsityPattern(SparsityPattern &&other) noexcept; + + /** + * Copy constructor. Sets the calling sparsity pattern to be the same as + * the input sparsity pattern. + */ + SparsityPattern(const SparsityPattern &input_sparsity_pattern); + + /** + * Destructor. Made virtual so that one can use pointers to this class. + */ + virtual ~SparsityPattern() override = default; + + /** + * Initialize a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely + * stored locally. + * + * The number of columns entries per row is specified as the maximum + * number of entries argument. This does not need to be an accurate + * number since the entries are allocated dynamically in a similar manner + * as for the deal.II DynamicSparsityPattern classes, but a good estimate + * will reduce the setup time of the sparsity pattern. + */ + void + reinit(const size_type m, + const size_type n, + const size_type n_entries_per_row = 0); + + /** + * Initialize a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely + * stored locally. + * + * The vector n_entries_per_row specifies the number of entries + * in each row. + */ + void + reinit(const size_type m, + const size_type n, + const std::vector &n_entries_per_row); + + /** + * Copy function. Sets the calling sparsity pattern to be the same as the + * input sparsity pattern. + */ + void + copy_from(const SparsityPattern &input_sparsity_pattern); + + /** + * Copy function from one of the deal.II sparsity patterns. If used in + * parallel, this function uses an ad-hoc partitioning of the rows and + * columns. + */ + template + void + copy_from(const SparsityPatternType &nontrilinos_sparsity_pattern); + + /** + * Copy operator. This operation is only allowed for empty objects, to + * avoid potentially very costly operations automatically synthesized by + * the compiler. Use copy_from() instead if you know that you really want + * to copy a sparsity pattern with non-trivial content. + */ + SparsityPattern & + operator=(const SparsityPattern &input_sparsity_pattern); + + /** + * Release all memory and return to a state just like after having called + * the default constructor. + * + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. + */ + void + clear(); + + /** + * In analogy to our own SparsityPattern class, this function compresses + * the sparsity pattern and allows the resulting pattern to be used for + * actually generating a (Trilinos-based) matrix. This function also + * exchanges non-local data that might have accumulated during the + * addition of new elements. This function must therefore be called once + * the structure is fixed. This is a collective operation, i.e., it needs + * to be run on all processors when used in parallel. + */ + void + compress(); + /** @} */ + + /** + * @name Constructors and initialization using an IndexSet description + */ + /** @{ */ + + /** + * Constructor for a square sparsity pattern using an IndexSet and an MPI + * communicator for the description of the %parallel partitioning. + * Moreover, the number of nonzero entries in the rows of the sparsity + * pattern can be specified. Note that this number does not need to be + * exact, and it is even allowed that the actual sparsity structure has + * more nonzero entries than specified in the constructor. However it is + * still advantageous to provide good estimates here since a good value + * will avoid repeated allocation of memory, which considerably increases + * the performance when creating the sparsity pattern. + */ + SparsityPattern(const IndexSet ¶llel_partitioning, + const MPI_Comm communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * Same as before, but now use the exact number of nonzero entries in + * each m row. Since we know the number of elements in the sparsity + * pattern exactly in this case, we can already allocate the right amount + * of memory, which makes the creation process by the respective + * SparsityPattern::reinit call considerably faster. However, + * this is a rather unusual situation, since knowing the number of entries + * in each row is usually connected to knowing the indices of nonzero + * entries, which the sparsity pattern is designed to describe. + */ + SparsityPattern(const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row); + + /** + * This constructor is similar to the one above, but it now takes two + * different index sets to describe the %parallel partitioning of rows and + * columns. This interface is meant to be used for generating rectangular + * sparsity pattern. Note that there is no real parallelism along the + * columns – the processor that owns a certain row always owns all + * the column elements, no matter how far they might be spread out. The + * second Tpetra::Map is only used to specify the number of columns and + * for internal arrangements when doing matrix-vector products with + * vectors based on that column map. + * + * The number of columns entries per row is specified as the maximum + * number of entries argument. + */ + SparsityPattern(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * This constructor is similar to the one above, but it now takes two + * different index sets for rows and columns. This interface is meant to + * be used for generating rectangular matrices, where one map specifies + * the %parallel distribution of rows and the second one specifies the + * distribution of degrees of freedom associated with matrix columns. This + * second map is however not used for the distribution of the columns + * themselves – rather, all column elements of a row are stored on + * the same processor. The vector n_entries_per_row specifies the + * number of entries in each row of the newly generated matrix. + */ + SparsityPattern(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row); + + /** + * This constructor constructs general sparsity patterns, possible non- + * square ones. Constructing a sparsity pattern this way allows the user + * to explicitly specify the rows into which we are going to add elements. + * This set is required to be a superset of the first index set @p + * row_parallel_partitioning that includes also rows that are owned by + * another processor (ghost rows). Note that elements can only be added to + * rows specified by @p writable_rows. + * + * This method is beneficial when the rows to which a processor is going + * to write can be determined before actually inserting elements into the + * matrix. For the typical parallel::distributed::Triangulation class used + * in deal.II, we know that a processor only will add row elements for + * what we call the locally relevant dofs (see + * DoFTools::extract_locally_relevant_dofs). The other constructors + * methods use general Trilinos facilities that allow to add elements to + * arbitrary rows (as done by all the other reinit functions). However, + * this flexibility come at a cost, the most prominent being that adding + * elements into the same matrix from multiple threads in shared memory is + * not safe whenever MPI is used. For these settings, the current method + * is the one to choose: It will store the off-processor data as an + * additional sparsity pattern (that is then passed to the Trilinos matrix + * via the reinit method) which can be organized in such a way that + * thread-safety can be ensured (as long as the user makes sure to never + * write into the same matrix row simultaneously, of course). + */ + SparsityPattern(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writable_rows, + const MPI_Comm communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * Reinitialization function for generating a square sparsity pattern + * using an IndexSet and an MPI communicator for the description of the + * %parallel partitioning and the number of nonzero entries in the rows of + * the sparsity pattern. Note that this number does not need to be exact, + * and it is even allowed that the actual sparsity structure has more + * nonzero entries than specified in the constructor. However it is still + * advantageous to provide good estimates here since this will + * considerably increase the performance when creating the sparsity + * pattern. + * + * This function does not create any entries by itself, but provides the + * correct data structures that can be used by the respective add() + * function. + */ + void + reinit(const IndexSet ¶llel_partitioning, + const MPI_Comm communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * Same as before, but now use the exact number of nonzero entries in + * each row. Since we know the number of elements in the sparsity pattern + * exactly in this case, we can already allocate the right amount of + * memory, which makes process of adding entries to the sparsity pattern + * considerably faster. However, this is a rather unusual situation, since + * knowing the number of entries in each row is usually connected to + * knowing the indices of nonzero entries, which the sparsity pattern is + * designed to describe. + */ + void + reinit(const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row); + + /** + * This reinit function is similar to the one above, but it now takes two + * different index sets for rows and columns. This interface is meant to + * be used for generating rectangular sparsity pattern, where one index + * set describes the %parallel partitioning of the dofs associated with + * the sparsity pattern rows and the other one of the sparsity pattern + * columns. Note that there is no real parallelism along the columns + * – the processor that owns a certain row always owns all the + * column elements, no matter how far they might be spread out. The second + * IndexSet is only used to specify the number of columns and for internal + * arrangements when doing matrix-vector products with vectors based on an + * Tpetra::Map based on that IndexSet. + * + * The number of columns entries per row is specified by the argument + * n_entries_per_row. + */ + void + reinit(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * This reinit function is used to specify general matrices, possibly non- + * square ones. In addition to the arguments of the other reinit method + * above, it allows the user to explicitly specify the rows into which we + * are going to add elements. This set is a superset of the first index + * set @p row_parallel_partitioning that includes also rows that are owned + * by another processor (ghost rows). + * + * This method is beneficial when the rows to which a processor is going + * to write can be determined before actually inserting elements into the + * matrix. For the typical parallel::distributed::Triangulation class used + * in deal.II, we know that a processor only will add row elements for + * what we call the locally relevant dofs (see + * DoFTools::extract_locally_relevant_dofs). Trilinos matrices allow to + * add elements to arbitrary rows (as done by all the other reinit + * functions) and this is what all the other reinit methods do, too. + * However, this flexibility come at a cost, the most prominent being that + * adding elements into the same matrix from multiple threads in shared + * memory is not safe whenever MPI is used. For these settings, the + * current method is the one to choose: It will store the off-processor + * data as an additional sparsity pattern (that is then passed to the + * Trilinos matrix via the reinit method) which can be organized in such a + * way that thread-safety can be ensured (as long as the user makes sure + * to never write into the same matrix row simultaneously, of course). + */ + void + reinit(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writeable_rows, + const MPI_Comm communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * Same as before, but now using a vector n_entries_per_row for + * specifying the number of entries in each row of the sparsity pattern. + */ + void + reinit(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row); + + /** + * Reinit function. Takes one of the deal.II sparsity patterns and the + * %parallel partitioning of the rows and columns specified by two index + * sets and a %parallel communicator for initializing the current Trilinos + * sparsity pattern. The optional argument @p exchange_data can be used + * for reinitialization with a sparsity pattern that is not fully + * constructed. This feature is only implemented for input sparsity + * patterns of type DynamicSparsityPattern. + */ + template + void + reinit(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityPatternType &nontrilinos_sparsity_pattern, + const MPI_Comm communicator = MPI_COMM_WORLD, + const bool exchange_data = false); + + /** + * Reinit function. Takes one of the deal.II sparsity patterns and a + * %parallel partitioning of the rows and columns for initializing the + * current Trilinos sparsity pattern. The optional argument @p + * exchange_data can be used for reinitialization with a sparsity pattern + * that is not fully constructed. This feature is only implemented for + * input sparsity patterns of type DynamicSparsityPattern. + */ + template + void + reinit(const IndexSet ¶llel_partitioning, + const SparsityPatternType &nontrilinos_sparsity_pattern, + const MPI_Comm communicator = MPI_COMM_WORLD, + const bool exchange_data = false); + /** @} */ + /** + * @name Information on the sparsity pattern + */ + /** @{ */ + + /** + * Return the state of the sparsity pattern, i.e., whether compress() + * needs to be called after an operation requiring data exchange. + */ + bool + is_compressed() const; + + /** + * Return the maximum number of entries per row on the current processor. + */ + unsigned int + max_entries_per_row() const; + + /** + * Return the local dimension of the sparsity pattern, i.e. the number of + * rows stored on the present MPI process. In the sequential case, this + * number is the same as n_rows(), but for parallel matrices it may be + * smaller. + * + * To figure out which elements exactly are stored locally, use + * local_range(). + */ + unsigned int + local_size() const; + + /** + * Return a pair of indices indicating which rows of this sparsity pattern + * are stored locally. The first number is the index of the first row + * stored, the second the index of the one past the last one that is + * stored locally. If this is a sequential matrix, then the result will be + * the pair (0,n_rows()), otherwise it will be a pair (i,i+n), where + * n=local_size(). + */ + std::pair + local_range() const; + + /** + * Return whether @p index is in the local range or not, see also + * local_range(). + */ + bool + in_local_range(const size_type index) const; + + /** + * Return the number of nonzero elements of this sparsity pattern. + */ + std::uint64_t + n_nonzero_elements() const; + + /** + * Return the number of entries in the given row. + * + * In a parallel context, the row in question may of course not be + * stored on the current processor, and in that case it is not + * possible to query the number of entries in it. In that case, + * the returned value is `static_cast(-1)`. + */ + size_type + row_length(const size_type row) const; + + /** + * Compute the bandwidth of the matrix represented by this structure. The + * bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ + * represents a nonzero entry of the matrix. Consequently, the maximum + * bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$. + */ + size_type + bandwidth() const; + + /** + * Return whether the object is empty. It is empty if no memory is + * allocated, which is the same as when both dimensions are zero. + */ + bool + empty() const; + + /** + * Return whether the index (i,j) exists in the sparsity pattern + * (i.e., it may be nonzero) or not. + */ + bool + exists(const size_type i, const size_type j) const; + + /** + * Return whether a given @p row is stored in the current object + * on this process. + */ + bool + row_is_stored_locally(const size_type i) const; + + /** + * Determine an estimate for the memory consumption (in bytes) of this + * object. Currently not implemented for this class. + */ + std::size_t + memory_consumption() const; + + /** @} */ + /** + * @name Adding entries + */ + /** @{ */ + /** + * Add the element (i,j) to the sparsity pattern. + */ + void + add(const size_type i, const size_type j); + + + /** + * Add several elements in one row to the sparsity pattern. + */ + template + void + add_entries(const size_type row, + ForwardIterator begin, + ForwardIterator end, + const bool indices_are_sorted = false); + + virtual void + add_row_entries( + const dealii::types::global_dof_index &row, + const ArrayView &columns, + const bool indices_are_sorted = false) override; + + using SparsityPatternBase::add_entries; + + /** @} */ + /** + * @name Access of underlying Trilinos data + */ + /** @{ */ + + /** + * Return a Teuchos::RCP to the underlying Trilinos Tpetra::CrsGraph + * data that stores the sparsity pattern. + */ + Teuchos::RCP + trilinos_sparsity_pattern() const; + + /** + * Return a const Teuchos::RCP to the underlying Trilinos Tpetra::Map that + * sets the parallel partitioning of the domain space of this sparsity + * pattern, i.e., the partitioning of the vectors matrices based on this + * sparsity pattern are multiplied with. + */ + Teuchos::RCP + domain_partitioner() const; + + /** + * Return a const Teuchos::RCP to the underlying Trilinos Tpetra::Map that + * sets the partitioning of the range space of this sparsity pattern, + * i.e., the partitioning of the vectors that are result from matrix- + * vector products. + */ + Teuchos::RCP + range_partitioner() const; + + /** + * Return the underlying MPI communicator. + */ + MPI_Comm + get_mpi_communicator() const; + + /** + * Return the underlying Teuchos::MPI communicator. + */ + Teuchos::RCP> + get_teuchos_mpi_communicator() const; + + /** @} */ + + /** + * @name Partitioners + */ + /** @{ */ + + /** + * Return the partitioning of the domain space of this pattern, i.e., the + * partitioning of the vectors a matrix based on this sparsity pattern has + * to be multiplied with. + */ + IndexSet + locally_owned_domain_indices() const; + + /** + * Return the partitioning of the range space of this pattern, i.e., the + * partitioning of the vectors that are the result from matrix-vector + * products from a matrix based on this pattern. + */ + IndexSet + locally_owned_range_indices() const; + + /** @} */ + + /** + * @name Iterators + */ + /** @{ */ + + /** + * Iterator starting at the first entry. + */ + const_iterator + begin() const; + + /** + * Final iterator. + */ + const_iterator + end() const; + + /** + * Iterator starting at the first entry of row @p r. + * + * Note that if the given row is empty, i.e. does not contain any nonzero + * entries, then the iterator returned by this function equals + * end(r). Note also that the iterator may not be dereferenceable + * in that case. + */ + const_iterator + begin(const size_type r) const; + + /** + * Final iterator of row r. It points to the first element past + * the end of line @p r, or past the end of the entire sparsity pattern. + * + * Note that the end iterator is not necessarily dereferenceable. This is + * in particular the case if it is the end iterator for the last row of a + * matrix. + */ + const_iterator + end(const size_type r) const; + + /** @} */ + /** + * @name Input/Output + */ + /** @{ */ + + /** + * Print (the locally owned part of) the sparsity pattern to the given + * stream, using the format (line,col). The optional flag outputs + * the sparsity pattern in Trilinos style, where even the according + * processor number is printed to the stream, as well as a summary before + * actually writing the entries. + */ + void + print(std::ostream &out, + const bool write_extended_trilinos_info = false) const; + + /** + * Print the sparsity of the matrix in a format that gnuplot + * understands and which can be used to plot the sparsity pattern in a + * graphical way. The format consists of pairs i j of nonzero + * elements, each representing one entry of this matrix, one per line of + * the output file. Indices are counted from zero on, as usual. Since + * sparsity patterns are printed in the same way as matrices are + * displayed, we print the negative of the column index, which means that + * the (0,0) element is in the top left rather than in the bottom + * left corner. + * + * Print the sparsity pattern in gnuplot by setting the data style to dots + * or points and use the plot command. + */ + void + print_gnuplot(std::ostream &out) const; + + /** @} */ + /** + * @addtogroup Exceptions + * @{ + */ + /** + * Exception + */ + DeclException1(ExcTrilinosError, + int, + << "An error with error number " << arg1 + << " occurred while calling a Trilinos function"); + + /** + * Exception + */ + DeclException2(ExcInvalidIndex, + size_type, + size_type, + << "The entry with index <" << arg1 << ',' << arg2 + << "> does not exist."); + + /** + * Exception + */ + DeclException4(ExcAccessToNonLocalElement, + size_type, + size_type, + size_type, + size_type, + << "You tried to access element (" << arg1 << '/' << arg2 + << ')' + << " of a distributed matrix, but only rows in range [" + << arg3 << ',' << arg4 + << "] are stored locally and can be accessed."); + + /** + * Exception + */ + DeclException2(ExcAccessToNonPresentElement, + size_type, + size_type, + << "You tried to access element (" << arg1 << '/' << arg2 + << ')' << " of a sparse matrix, but it appears to not" + << " exist in the Trilinos sparsity pattern."); + /** @} */ + private: + /** + * Teuchos::RCP to the user-supplied Tpetra Trilinos mapping of the matrix + * columns that assigns parts of the matrix to the individual processes. + */ + Teuchos::RCP column_space_map; + + /** + * A sparsity pattern object in Trilinos to be used for finite element + * based problems which allows for adding non-local elements to the + * pattern. + */ + Teuchos::RCP graph; + + /** + * A sparsity pattern object for the non-local part of the sparsity + * pattern that is going to be sent to the owning processor. Only used + * when the particular constructor or reinit method with writable_rows + * argument is set + */ + Teuchos::RCP nonlocal_graph; + + // TODO: currently only for double + friend class SparseMatrix; + friend class SparsityPatternIterators::Accessor; + friend class SparsityPatternIterators::Iterator; + }; + + + + // ---------------- inline and template functions ----------------- + + +# ifndef DOXYGEN + + namespace SparsityPatternIterators + { + template + inline Accessor::Accessor(const SparsityPattern *sp, + const size_type row, + const size_type index) + : sparsity_pattern(const_cast *>(sp)) + , a_row(row) + , a_index(index) + { + visit_present_row(); + } + + + + template + inline typename Accessor::size_type + Accessor::row() const + { + Assert(a_row < sparsity_pattern->n_rows(), + ExcBeyondEndOfSparsityPattern()); + return a_row; + } + + + + template + inline typename Accessor::size_type + Accessor::column() const + { + Assert(a_row < sparsity_pattern->n_rows(), + ExcBeyondEndOfSparsityPattern()); + return (*colnum_cache)[a_index]; + } + + + + template + inline typename Accessor::size_type + Accessor::index() const + { + Assert(a_row < sparsity_pattern->n_rows(), + ExcBeyondEndOfSparsityPattern()); + return a_index; + } + + + + template + inline Iterator::Iterator(const SparsityPattern *sp, + const size_type row, + const size_type index) + : accessor(sp, row, index) + {} + + + + template + inline Iterator::Iterator(const Iterator &) = default; + + + + template + inline Iterator & + Iterator::operator++() + { + Assert(accessor.a_row < accessor.sparsity_pattern->n_rows(), + ExcIteratorPastEnd()); + + ++accessor.a_index; + + // If at end of line: do one step, then cycle until we find a row with a + // nonzero number of entries that is stored locally. + if (accessor.a_index >= + static_cast( + accessor.colnum_cache->size())) + { + accessor.a_index = 0; + ++accessor.a_row; + + while (accessor.a_row < + static_cast( + accessor.sparsity_pattern->n_rows())) + { + const auto row_length = + accessor.sparsity_pattern->row_length(accessor.a_row); + if (row_length == 0 || + !accessor.sparsity_pattern->row_is_stored_locally( + accessor.a_row)) + ++accessor.a_row; + else + break; + } + + accessor.visit_present_row(); + } + return *this; + } + + + + template + inline Iterator + Iterator::operator++(int) + { + const Iterator old_state = *this; + ++(*this); + return old_state; + } + + + + template + inline const Accessor & + Iterator::operator*() const + { + return accessor; + } + + + + template + inline const Accessor * + Iterator::operator->() const + { + return &accessor; + } + + + + template + inline bool + Iterator::operator==(const Iterator &other) const + { + return (accessor.a_row == other.accessor.a_row && + accessor.a_index == other.accessor.a_index); + } + + + + template + inline bool + Iterator::operator!=(const Iterator &other) const + { + return !(*this == other); + } + + + + template + inline bool + Iterator::operator<(const Iterator &other) const + { + return (accessor.row() < other.accessor.row() || + (accessor.row() == other.accessor.row() && + accessor.index() < other.accessor.index())); + } + + } // namespace SparsityPatternIterators + + + + template + inline typename SparsityPattern::const_iterator + SparsityPattern::begin() const + { + const size_type first_valid_row = this->local_range().first; + return const_iterator(this, first_valid_row, 0); + } + + + + template + inline typename SparsityPattern::const_iterator + SparsityPattern::end() const + { + return const_iterator(this, n_rows(), 0); + } + + + + template + inline typename SparsityPattern::const_iterator + SparsityPattern::begin(const size_type r) const + { + AssertIndexRange(r, n_rows()); + if (row_length(r) > 0) + return const_iterator(this, r, 0); + else + return end(r); + } + + + + template + inline typename SparsityPattern::const_iterator + SparsityPattern::end(const size_type r) const + { + AssertIndexRange(r, n_rows()); + + // place the iterator on the first entry + // past this line, or at the end of the + // matrix + for (size_type i = r + 1; + i < static_cast(n_rows()); + ++i) + if (row_length(i) > 0) + return const_iterator(this, i, 0); + + // if there is no such line, then take the + // end iterator of the matrix + return end(); + } + + + + template + inline bool + SparsityPattern::in_local_range(const size_type index) const + { + const TrilinosWrappers::types::int_type begin = + graph->getRowMap()->getMinGlobalIndex(); + const TrilinosWrappers::types::int_type end = + graph->getRowMap()->getMaxGlobalIndex() + 1; + + return ((index >= static_cast(begin)) && + (index < static_cast(end))); + } + + + + template + inline bool + SparsityPattern::is_compressed() const + { + return graph->isFillComplete(); + } + + + + template + inline bool + SparsityPattern::empty() const + { + return ((n_rows() == 0) && (n_cols() == 0)); + } + + + + template + inline void + SparsityPattern::add(const size_type i, const size_type j) + { + add_entries(i, &j, &j + 1); + } + + + + template + template + inline void + SparsityPattern::add_entries(const size_type row, + ForwardIterator begin, + ForwardIterator end, + const bool /*indices_are_sorted*/) + { + if (begin == end) + return; + + // verify that the size of the data type Trilinos expects matches that the + // iterator points to. we allow for some slippage between signed and + // unsigned and only compare that they are both either 32 or 64 bit. to + // write this test properly, not that we cannot compare the size of + // '*begin' because 'begin' may be an iterator and '*begin' may be an + // accessor class. consequently, we need to somehow get an actual value + // from it which we can by evaluating an expression such as when + // multiplying the value produced by 2 + Assert(sizeof(TrilinosWrappers::types::int_type) == sizeof((*begin) * 2), + ExcNotImplemented()); + + const TrilinosWrappers::types::int_type *col_index_ptr_begin = + reinterpret_cast( + const_cast::type *>(&*begin)); + + const TrilinosWrappers::types::int_type *col_index_ptr_end = + reinterpret_cast( + const_cast::type *>(&*end)); + + // Check at least for the first index that the conversion actually works + AssertDimension(*col_index_ptr_begin, *begin); + AssertDimension(*col_index_ptr_end, *end); + TrilinosWrappers::types::int_type trilinos_row_index = row; + + Teuchos::Array array(col_index_ptr_begin, col_index_ptr_end); + + if (row_is_stored_locally(row)) + graph->insertGlobalIndices(trilinos_row_index, array()); + else if (nonlocal_graph.get() != nullptr) + { + // this is the case when we have explicitly set the off-processor rows + // and want to create a separate matrix object for them (to retain + // thread-safety) + Assert(nonlocal_graph->getRowMap()->getLocalElement(row) != + Teuchos::OrdinalTraits< + dealii::types::signed_global_dof_index>::invalid(), + ExcMessage("Attempted to write into off-processor matrix row " + "that has not be specified as being writable upon " + "initialization")); + nonlocal_graph->insertGlobalIndices(trilinos_row_index, array); + } + else + graph->insertGlobalIndices(trilinos_row_index, array); + } + + + + template + inline Teuchos::RCP< + Tpetra::CrsGraph> + SparsityPattern::trilinos_sparsity_pattern() const + { + return graph; + } + + + + template + inline IndexSet + SparsityPattern::locally_owned_domain_indices() const + { + return IndexSet(graph->getDomainMap().getConst()); + } + + + + template + inline IndexSet + SparsityPattern::locally_owned_range_indices() const + { + return IndexSet(graph->getRangeMap().getConst()); + } + +# endif // DOXYGEN + } // namespace TpetraWrappers + +} // namespace LinearAlgebra + + +DEAL_II_NAMESPACE_CLOSE + + +#endif // DEAL_II_TRILINOS_WITH_TPETRA + +#endif diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index a3a8a53315..207197951e 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -144,6 +144,8 @@ if(DEAL_II_WITH_TRILINOS) trilinos_sparse_matrix.cc trilinos_sparsity_pattern.cc trilinos_tpetra_communication_pattern.cc + trilinos_tpetra_sparse_matrix.cc + trilinos_tpetra_sparsity_pattern.cc trilinos_tpetra_vector.cc trilinos_vector.cc ) diff --git a/source/lac/affine_constraints.inst.in b/source/lac/affine_constraints.inst.in index 3dad71cdd1..e3665527a8 100644 --- a/source/lac/affine_constraints.inst.in +++ b/source/lac/affine_constraints.inst.in @@ -238,6 +238,27 @@ for (S : REAL_AND_COMPLEX_SCALARS) MatrixBlock> &) const; } +// --------------------------------------------------------------------- +// +// Tpetra: +// +// --------------------------------------------------------------------- + +for (S : TRILINOS_SCALARS) + { + template void AffineConstraints::distribute_local_to_global< + LinearAlgebra::TpetraWrappers::SparseMatrix, + LinearAlgebra::TpetraWrappers::Vector>( + const FullMatrix &, + const Vector &, + const std::vector::size_type> &, + LinearAlgebra::TpetraWrappers::SparseMatrix &, + LinearAlgebra::TpetraWrappers::Vector &, + bool, + std::integral_constant) const; + } + + // --------------------------------------------------------------------- // diff --git a/source/lac/trilinos_tpetra_sparse_matrix.cc b/source/lac/trilinos_tpetra_sparse_matrix.cc new file mode 100644 index 0000000000..6e5ce3fcc3 --- /dev/null +++ b/source/lac/trilinos_tpetra_sparse_matrix.cc @@ -0,0 +1,46 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include + +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + +# include + +DEAL_II_NAMESPACE_OPEN + +# ifndef DOXYGEN +// explicit instantiations +namespace LinearAlgebra +{ + namespace TpetraWrappers + { + template class SparseMatrix; + + template void + SparseMatrix::reinit( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const dealii::DynamicSparsityPattern &sparsity_pattern, + const MPI_Comm communicator, + const bool exchange_data); + + } // namespace TpetraWrappers +} // namespace LinearAlgebra +# endif // DOXYGEN + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_TRILINOS_WITH_TPETRA diff --git a/source/lac/trilinos_tpetra_sparsity_pattern.cc b/source/lac/trilinos_tpetra_sparsity_pattern.cc new file mode 100644 index 0000000000..31b01f8430 --- /dev/null +++ b/source/lac/trilinos_tpetra_sparsity_pattern.cc @@ -0,0 +1,1049 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include + +#ifdef DEAL_II_TRILINOS_WITH_TPETRA + +# include +# include + +# include +# include +# include +# include + +# include + +DEAL_II_NAMESPACE_OPEN + +namespace LinearAlgebra +{ + + namespace TpetraWrappers + { + namespace SparsityPatternIterators + { + template + void + Accessor::visit_present_row() + { + // if we are asked to visit the past-the-end line, then simply + // release all our caches and go on with life + if (static_cast(this->a_row) == sparsity_pattern->n_rows()) + { + colnum_cache.reset(); + return; + } + + // otherwise first flush Trilinos caches if necessary + if (!sparsity_pattern->is_compressed()) + sparsity_pattern->compress(); + + colnum_cache = + std::make_shared>( + sparsity_pattern->row_length(this->a_row)); + + if (colnum_cache->size() > 0) + { + // get a representation of the present row + std::size_t ncols; + typename Tpetra::CrsGraph< + int, + dealii::types::signed_global_dof_index, + NodeType>::nonconst_global_inds_host_view_type + column_indices_view(colnum_cache->data(), colnum_cache->size()); + sparsity_pattern->graph->getGlobalRowCopy(this->a_row, + column_indices_view, + ncols); + AssertThrow(ncols == colnum_cache->size(), ExcInternalError()); + } + } + } // namespace SparsityPatternIterators + + + // The constructor is actually the only point where we have to check whether + // we build a serial or a parallel Trilinos matrix. Actually, it does not + // even matter how many threads there are, but only if we use an MPI + // compiler or a standard compiler. So, even one thread on a configuration + // with MPI will still get a parallel interface. + template + SparsityPattern::SparsityPattern() + { + column_space_map = Utilities::Trilinos::internal::make_rcp( + TrilinosWrappers::types::int_type(0), + TrilinosWrappers::types::int_type(0), + Utilities::Trilinos::tpetra_comm_self()); + graph = + Utilities::Trilinos::internal::make_rcp(column_space_map, + column_space_map, + 0); + graph->fillComplete(); + } + + + + template + SparsityPattern::SparsityPattern( + const size_type m, + const size_type n, + const size_type n_entries_per_row) + { + reinit(m, n, n_entries_per_row); + } + + + + template + SparsityPattern::SparsityPattern( + const size_type m, + const size_type n, + const std::vector &n_entries_per_row) + { + reinit(m, n, n_entries_per_row); + } + + + + template + SparsityPattern::SparsityPattern( + SparsityPattern &&other) noexcept + : SparsityPatternBase(std::move(other)) + , column_space_map(std::move(other.column_space_map)) + , graph(std::move(other.graph)) + , nonlocal_graph(std::move(other.nonlocal_graph)) + {} + + + + // Copy function only works if the sparsity pattern is empty. + template + SparsityPattern::SparsityPattern( + const SparsityPattern &input_sparsity) + : SparsityPatternBase(input_sparsity) + , column_space_map(Utilities::Trilinos::internal::make_rcp( + 0, + 0, + Utilities::Trilinos::tpetra_comm_self())) + , graph( + Utilities::Trilinos::internal::make_rcp(column_space_map, + column_space_map, + 0)) + { + (void)input_sparsity; + Assert(input_sparsity.n_rows() == 0, + ExcMessage( + "Copy constructor only works for empty sparsity patterns.")); + } + + + + template + SparsityPattern::SparsityPattern( + const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const size_type n_entries_per_row) + { + reinit(parallel_partitioning, + parallel_partitioning, + communicator, + n_entries_per_row); + } + + + + template + SparsityPattern::SparsityPattern( + const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row) + { + reinit(parallel_partitioning, + parallel_partitioning, + communicator, + n_entries_per_row); + } + + + + template + SparsityPattern::SparsityPattern( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const size_type n_entries_per_row) + { + reinit(row_parallel_partitioning, + col_parallel_partitioning, + communicator, + n_entries_per_row); + } + + + + template + SparsityPattern::SparsityPattern( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row) + { + reinit(row_parallel_partitioning, + col_parallel_partitioning, + communicator, + n_entries_per_row); + } + + + + template + SparsityPattern::SparsityPattern( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writable_rows, + const MPI_Comm communicator, + const size_type n_max_entries_per_row) + { + reinit(row_parallel_partitioning, + col_parallel_partitioning, + writable_rows, + communicator, + n_max_entries_per_row); + } + + + + template + void + SparsityPattern::reinit(const size_type m, + const size_type n, + const size_type n_entries_per_row) + { + reinit(complete_index_set(m), + complete_index_set(n), + MPI_COMM_SELF, + n_entries_per_row); + } + + + + template + void + SparsityPattern::reinit( + const size_type m, + const size_type n, + const std::vector &n_entries_per_row) + { + reinit(complete_index_set(m), + complete_index_set(n), + MPI_COMM_SELF, + n_entries_per_row); + } + + + + namespace + { + template + using size_type = typename SparsityPattern::size_type; + + template + using MapType = + Tpetra::Map; + + template + using GraphType = + Tpetra::CrsGraph; + + template + void + reinit_sp(const Teuchos::RCP> &row_map, + const Teuchos::RCP> &col_map, + const size_type n_entries_per_row, + Teuchos::RCP> &column_space_map, + Teuchos::RCP> &graph, + Teuchos::RCP> &nonlocal_graph) + { + Assert(row_map->isOneToOne(), + ExcMessage("Row map must be 1-to-1, i.e., no overlap between " + "the maps of different processors.")); + Assert(col_map->isOneToOne(), + ExcMessage("Column map must be 1-to-1, i.e., no overlap between " + "the maps of different processors.")); + + nonlocal_graph.reset(); + graph.reset(); + column_space_map = col_map; + + // for more than one processor, need to specify only row map first and + // let the matrix entries decide about the column map (which says which + // columns are present in the matrix, not to be confused with the + // col_map that tells how the domain dofs of the matrix will be + // distributed). for only one processor, we can directly assign the + // columns as well. If we use a recent Trilinos version, we can also + // require building a non-local graph which gives us thread-safe + // initialization. + graph = Utilities::Trilinos::internal::make_rcp>( + row_map, row_map, n_entries_per_row); + } + + + + template + void + reinit_sp(const Teuchos::RCP> &row_map, + const Teuchos::RCP> &col_map, + const std::vector> &n_entries_per_row, + Teuchos::RCP> &column_space_map, + Teuchos::RCP> &graph, + Teuchos::RCP> &nonlocal_graph) + { + Assert(row_map->isOneToOne(), + ExcMessage("Row map must be 1-to-1, i.e., no overlap between " + "the maps of different processors.")); + Assert(col_map->isOneToOne(), + ExcMessage("Column map must be 1-to-1, i.e., no overlap between " + "the maps of different processors.")); + + // release memory before reallocation + nonlocal_graph.reset(); + graph.reset(); + AssertDimension(n_entries_per_row.size(), + row_map->getGlobalNumElements()); + + column_space_map = col_map; + + // Translate the vector of row lengths into one that only stores + // those entries that related to the locally stored rows of the matrix: + Kokkos::DualView local_entries_per_row( + "local_entries_per_row", + row_map->getMaxGlobalIndex() - row_map->getMinGlobalIndex()); + + auto local_entries_per_row_host = + local_entries_per_row.view(); + + std::uint64_t total_size = 0; + for (unsigned int i = 0; i < local_entries_per_row.extent(0); ++i) + { + local_entries_per_row_host(i) = + n_entries_per_row[row_map->getMinGlobalIndex() + i]; + total_size += local_entries_per_row_host[i]; + } + local_entries_per_row.modify(); + local_entries_per_row.sync(); + + AssertThrow( + total_size < static_cast( + std::numeric_limits< + dealii::types::signed_global_dof_index>::max()), + ExcMessage( + "You are requesting to store more elements than global ordinal type allows.")); + + graph = Utilities::Trilinos::internal::make_rcp>( + row_map, col_map, local_entries_per_row); + } + + + + template + void + reinit_sp(const Teuchos::RCP> &row_map, + const Teuchos::RCP> &col_map, + const SparsityPatternType &sp, + [[maybe_unused]] const bool exchange_data, + Teuchos::RCP> &column_space_map, + Teuchos::RCP> &graph, + Teuchos::RCP> &nonlocal_graph) + { + nonlocal_graph.reset(); + graph.reset(); + + AssertDimension(sp.n_rows(), row_map->getGlobalNumElements()); + AssertDimension(sp.n_cols(), col_map->getGlobalNumElements()); + + column_space_map = + Utilities::Trilinos::internal::make_rcp>(*col_map); + + Assert(row_map->isContiguous() == true, + ExcMessage( + "This function only works if the row map is contiguous.")); + + const size_type first_row = row_map->getMinGlobalIndex(), + last_row = row_map->getMaxGlobalIndex() + 1; + Teuchos::Array n_entries_per_row(last_row - first_row); + + for (size_type row = first_row; row < last_row; ++row) + n_entries_per_row[row - first_row] = sp.row_length(row); + + AssertThrow( + std::accumulate(n_entries_per_row.begin(), + n_entries_per_row.end(), + std::uint64_t(0)) < + static_cast(std::numeric_limits::max()), + ExcMessage( + "The TrilinosWrappers use Tpetra internally, and " + "Trilinos/Tpetra was compiled with 'local ordinate = int'. " + "Therefore, 'signed int' is used to represent local indices, " + "and only 2,147,483,647 nonzero matrix entries can be stored " + "on a single process, but you are requesting more than " + "that. Either use more MPI processes or recompile Trilinos " + "with 'local ordinate = long long' ")); + + if (row_map->getComm()->getSize() > 1) + graph = Utilities::Trilinos::internal::make_rcp>( + row_map, n_entries_per_row()); + else + graph = Utilities::Trilinos::internal::make_rcp>( + row_map, col_map, n_entries_per_row()); + + AssertDimension(sp.n_rows(), graph->getGlobalNumRows()); + AssertDimension(sp.n_cols(), graph->getGlobalNumEntries()); + + std::vector row_indices; + + for (size_type row = first_row; row < last_row; ++row) + { + const TrilinosWrappers::types::int_type row_length = + sp.row_length(row); + if (row_length == 0) + continue; + + row_indices.resize(row_length, -1); + { + typename SparsityPatternType::iterator p = sp.begin(row); + // avoid incrementing p over the end of the current row because + // it is slow for DynamicSparsityPattern in parallel + for (int col = 0; col < row_length;) + { + row_indices[col++] = p->column(); + if (col < row_length) + ++p; + } + } + graph->insertGlobalIndices(row, row_length, row_indices.data()); + } + + graph->globalAssemble(); + } + } // namespace + + + template + void + SparsityPattern::reinit(const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const size_type n_entries_per_row) + { + SparsityPatternBase::resize(parallel_partitioning.size(), + parallel_partitioning.size()); + Teuchos::RCP map = + parallel_partitioning.make_tpetra_map_rcp(communicator, false); + reinit_sp( + map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph); + } + + + + template + void + SparsityPattern::reinit( + const IndexSet ¶llel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row) + { + SparsityPatternBase::resize(parallel_partitioning.size(), + parallel_partitioning.size()); + Teuchos::RCP map = + parallel_partitioning.make_tpetra_map_rcp(communicator, false); + reinit_sp( + map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph); + } + + + + template + void + SparsityPattern::reinit(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const size_type n_entries_per_row) + { + SparsityPatternBase::resize(row_parallel_partitioning.size(), + col_parallel_partitioning.size()); + Teuchos::RCP row_map = + row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + Teuchos::RCP col_map = + col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + reinit_sp(row_map, + col_map, + n_entries_per_row, + column_space_map, + graph, + nonlocal_graph); + } + + + + template + void + SparsityPattern::reinit( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm communicator, + const std::vector &n_entries_per_row) + { + SparsityPatternBase::resize(row_parallel_partitioning.size(), + col_parallel_partitioning.size()); + Teuchos::RCP row_map = + row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + Teuchos::RCP col_map = + col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + reinit_sp(row_map, + col_map, + n_entries_per_row, + column_space_map, + graph, + nonlocal_graph); + } + + + + template + void + SparsityPattern::reinit(const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writable_rows, + const MPI_Comm communicator, + const size_type n_entries_per_row) + { + SparsityPatternBase::resize(row_parallel_partitioning.size(), + col_parallel_partitioning.size()); + Teuchos::RCP row_map = + row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + Teuchos::RCP col_map = + col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + reinit_sp(row_map, + col_map, + n_entries_per_row, + column_space_map, + graph, + nonlocal_graph); + + IndexSet nonlocal_partitioner = writable_rows; + AssertDimension(nonlocal_partitioner.size(), + row_parallel_partitioning.size()); +# ifdef DEBUG + { + IndexSet tmp = writable_rows & row_parallel_partitioning; + Assert(tmp == row_parallel_partitioning, + ExcMessage( + "The set of writable rows passed to this method does not " + "contain the locally owned rows, which is not allowed.")); + } +# endif + nonlocal_partitioner.subtract_set(row_parallel_partitioning); + if (Utilities::MPI::n_mpi_processes(communicator) > 1) + { + Teuchos::RCP nonlocal_map = + nonlocal_partitioner.make_tpetra_map_rcp(communicator, true); + nonlocal_graph = + Utilities::Trilinos::internal::make_rcp(nonlocal_map, + col_map, + 0); + } + else + Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError()); + } + + + + template + template + void + SparsityPattern::reinit( + const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityPatternType &nontrilinos_sparsity_pattern, + const MPI_Comm communicator, + const bool exchange_data) + { + SparsityPatternBase::resize(row_parallel_partitioning.size(), + col_parallel_partitioning.size()); + Teuchos::RCP row_map = + row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + Teuchos::RCP col_map = + col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + reinit_sp(row_map, + col_map, + nontrilinos_sparsity_pattern, + exchange_data, + column_space_map, + graph, + nonlocal_graph); + } + + + + template + template + void + SparsityPattern::reinit( + const IndexSet ¶llel_partitioning, + const SparsityPatternType &nontrilinos_sparsity_pattern, + const MPI_Comm communicator, + const bool exchange_data) + { + AssertDimension(nontrilinos_sparsity_pattern.n_rows(), + parallel_partitioning.size()); + AssertDimension(nontrilinos_sparsity_pattern.n_cols(), + parallel_partitioning.size()); + SparsityPatternBase::resize(parallel_partitioning.size(), + parallel_partitioning.size()); + Teuchos::RCP map = + parallel_partitioning.make_tpetra_map_rcp(communicator, false); + reinit_sp(map, + map, + nontrilinos_sparsity_pattern, + exchange_data, + column_space_map, + graph, + nonlocal_graph); + } + + + + template + SparsityPattern & + SparsityPattern::operator=(const SparsityPattern &) + { + Assert(false, ExcNotImplemented()); + return *this; + } + + + + template + void + SparsityPattern::copy_from(const SparsityPattern &sp) + { + SparsityPatternBase::resize(sp.n_rows(), sp.n_cols()); + column_space_map = + Utilities::Trilinos::internal::make_rcp(*sp.column_space_map); + graph = Utilities::Trilinos::internal::make_rcp(*sp.graph); + + if (sp.nonlocal_graph.get() != nullptr) + nonlocal_graph = Utilities::Trilinos::internal::make_rcp( + *sp.nonlocal_graph); + else + nonlocal_graph.reset(); + } + + + + template + template + void + SparsityPattern::copy_from(const SparsityPatternType &sp) + { + SparsityPatternBase::resize(sp.n_rows(), sp.n_cols()); + Teuchos::RCP rows = + Utilities::Trilinos::internal::make_rcp( + TrilinosWrappers::types::int_type(sp.n_rows()), + 0, + Utilities::Trilinos::tpetra_comm_self()); + Teuchos::RCP columns = + Utilities::Trilinos::internal::make_rcp( + TrilinosWrappers::types::int_type(sp.n_cols()), + 0, + Utilities::Trilinos::tpetra_comm_self()); + + reinit_sp( + rows, columns, sp, false, column_space_map, graph, nonlocal_graph); + } + + + + template + void + SparsityPattern::clear() + { + SparsityPatternBase::resize(0, 0); + // When we clear the matrix, reset + // the pointer and generate an + // empty sparsity pattern. + column_space_map = Utilities::Trilinos::internal::make_rcp( + TrilinosWrappers::types::int_type(0), + TrilinosWrappers::types::int_type(0), + Utilities::Trilinos::tpetra_comm_self()); + graph = + Utilities::Trilinos::internal::make_rcp(column_space_map, + column_space_map, + 0); + graph->fillComplete(); + + nonlocal_graph.reset(); + } + + + + template + void + SparsityPattern::compress() + { + Assert(column_space_map.get(), ExcInternalError()); + if (nonlocal_graph.get() != nullptr) + { + if (nonlocal_graph->getRowMap()->getLocalNumElements() > 0 && + column_space_map->getGlobalNumElements() > 0) + { + // Insert dummy element at (row, column) that corresponds to row 0 + // in local index counting. + TrilinosWrappers::types::int_type row = + nonlocal_graph->getRowMap()->getGlobalElement(0); + TrilinosWrappers::types::int_type column = 0; + + // in case we have a square sparsity pattern, add the entry on the + // diagonal + if (column_space_map->getGlobalNumElements() == + graph->getRangeMap()->getGlobalNumElements()) + column = row; + // if not, take a column index that we have ourselves since we + // know for sure it is there (and it will not create spurious + // messages to many ranks like putting index 0 on many processors) + else if (column_space_map->getLocalNumElements() > 0) + column = column_space_map->getGlobalElement(0); + nonlocal_graph->insertGlobalIndices(row, 1, &column); + } + Assert(nonlocal_graph->getRowMap()->getLocalNumElements() == 0 || + column_space_map->getGlobalNumElements() == 0, + ExcInternalError()); + + nonlocal_graph->fillComplete(column_space_map, graph->getRangeMap()); + graph->fillComplete(column_space_map, graph->getRangeMap()); + } + else + { + graph->globalAssemble(); + } + + // Check consistency between the sizes set at the beginning and what + // Trilinos stores: + using namespace deal_II_exceptions::internals; + Assert(compare_for_equality(n_rows(), graph->getGlobalNumEntries()), + ExcInternalError()); + Assert(compare_for_equality(n_cols(), graph->getGlobalNumEntries()), + ExcInternalError()); + } + + + + template + bool + SparsityPattern::row_is_stored_locally(const size_type i) const + { + return graph->getRowMap()->getLocalElement(i) != + Teuchos::OrdinalTraits::invalid(); + } + + + + template + bool + SparsityPattern::exists(const size_type i, + const size_type j) const + { + if (!row_is_stored_locally(i)) + return false; + + // Extract local indices in the matrix. + const auto trilinos_i = graph->getRowMap()->getLocalElement(i); + const auto trilinos_j = graph->getColMap()->getLocalElement(j); + + typename GraphType::local_inds_host_view_type col_indices; + + // Generate the view. + graph->getLocalRowView(trilinos_i, col_indices); + + // Search the index + const size_type local_col_index = + std::find(col_indices.data(), + col_indices.data() + col_indices.size(), + trilinos_j) - + col_indices.data(); + + return static_cast(local_col_index) != col_indices.size(); + } + + + + template + typename SparsityPattern::size_type + SparsityPattern::bandwidth() const + { + size_type local_b = 0; + for (int i = 0; i < static_cast(local_size()); ++i) + { + typename GraphType::local_inds_host_view_type indices; + graph->getLocalRowView(i, indices); + const auto num_entries = indices.size(); + for (unsigned int j = 0; j < static_cast(num_entries); + ++j) + { + if (static_cast(std::abs(i - indices[j])) > local_b) + local_b = std::abs(i - indices[j]); + } + } + + TrilinosWrappers::types::int_type global_b = + Utilities::MPI::max(local_b, + Utilities::Trilinos::teuchos_comm_to_mpi_comm( + graph->getComm())); + return static_cast(global_b); + } + + + + template + unsigned int + SparsityPattern::local_size() const + { + return graph->getLocalNumRows(); + } + + + + template + std::pair::size_type, + typename SparsityPattern::size_type> + SparsityPattern::local_range() const + { + const size_type begin = graph->getRowMap()->getMinGlobalIndex(); + const size_type end = graph->getRowMap()->getMaxGlobalIndex() + 1; + + return {begin, end}; + } + + + + template + std::uint64_t + SparsityPattern::n_nonzero_elements() const + { + return graph->getGlobalNumEntries(); + } + + + + template + unsigned int + SparsityPattern::max_entries_per_row() const + { + return graph->getLocalMaxNumRowEntries(); + } + + + + template + typename SparsityPattern::size_type + SparsityPattern::row_length(const size_type row) const + { + Assert(row < (size_type)n_rows(), ExcInternalError()); + + // Get a representation of the where the present row is located on + // the current processor + TrilinosWrappers::types::int_type local_row = + graph->getRowMap()->getLocalElement(row); + + // On the processor who owns this row, we'll have a non-negative + // value for `local_row` and can ask for the length of the row. + if (local_row >= 0) + return graph->getNumEntriesInLocalRow(local_row); + else + return static_cast(-1); + } + + + + template + void + SparsityPattern::add_row_entries( + const dealii::types::global_dof_index &row, + const ArrayView &columns, + const bool indices_are_sorted) + { + add_entries(row, columns.begin(), columns.end(), indices_are_sorted); + } + + + + template + Teuchos::RCP> + SparsityPattern::domain_partitioner() const + { + return graph->getDomainMap(); + } + + + + template + Teuchos::RCP> + SparsityPattern::range_partitioner() const + { + return graph->getRangeMap(); + } + + + + template + MPI_Comm + SparsityPattern::get_mpi_communicator() const + { + return Utilities::Trilinos::teuchos_comm_to_mpi_comm( + graph->getRangeMap()->getComm()); + } + + + + template + Teuchos::RCP> + SparsityPattern::get_teuchos_mpi_communicator() const + { + return graph->getRangeMap()->getComm(); + } + + + + // As of now, no particularly neat + // output is generated in case of + // multiple processors. + template + void + SparsityPattern::print( + std::ostream &out, + const bool write_extended_trilinos_info) const + { + if (write_extended_trilinos_info) + out << *graph; + else + { + for (unsigned int i = 0; i < graph->getLocalNumRows(); ++i) + { + typename GraphType::local_inds_host_view_type indices; + graph->getLocalRowView(i, indices); + int num_entries = indices.size(); + for (int j = 0; j < num_entries; ++j) + out << "(" << graph->getRowMap()->getGlobalElement(i) << "," + << graph->getColMap()->getGlobalElement(indices[j]) << ") " + << std::endl; + } + } + + AssertThrow(out.fail() == false, ExcIO()); + } + + + + template + void + SparsityPattern::print_gnuplot(std::ostream &out) const + { + Assert(graph->isFillComplete() == true, ExcInternalError()); + for (dealii::types::signed_global_dof_index row = 0; row < local_size(); + ++row) + { + typename GraphType::local_inds_host_view_type indices; + graph->getLocalRowView(row, indices); + int num_entries = indices.size(); + + Assert(num_entries >= 0, ExcInternalError()); + // avoid sign comparison warning + const dealii::types::signed_global_dof_index num_entries_ = + num_entries; + for (dealii::types::signed_global_dof_index j = 0; j < num_entries_; + ++j) + // while matrix entries are usually + // written (i,j), with i vertical and + // j horizontal, gnuplot output is + // x-y, that is we have to exchange + // the order of output + out << static_cast( + graph->getColMap()->getGlobalElement(indices[j])) + << " " + << -static_cast(graph->getRowMap()->getGlobalElement(row)) + << std::endl; + } + + AssertThrow(out.fail() == false, ExcIO()); + } + + // TODO: Implement! + template + std::size_t + SparsityPattern::memory_consumption() const + { + Assert(false, ExcNotImplemented()); + return 0; + } + + +# ifndef DOXYGEN + // explicit instantiations + template class SparsityPattern< + Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>; + + template void + SparsityPattern:: + copy_from(const dealii::SparsityPattern &); + template void + SparsityPattern:: + copy_from(const dealii::DynamicSparsityPattern &); + + template void + SparsityPattern:: + reinit(const IndexSet &, + const dealii::SparsityPattern &, + const MPI_Comm, + bool); + template void + SparsityPattern:: + reinit(const IndexSet &, + const dealii::DynamicSparsityPattern &, + const MPI_Comm, + bool); + + + template void + SparsityPattern:: + reinit(const IndexSet &, + const IndexSet &, + const dealii::SparsityPattern &, + const MPI_Comm, + bool); + template void + SparsityPattern:: + reinit(const IndexSet &, + const IndexSet &, + const dealii::DynamicSparsityPattern &, + const MPI_Comm, + bool); +# endif + + } // namespace TpetraWrappers + +} // namespace LinearAlgebra + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_TRILINOS_WITH_TPETRA