parallel::distributed::Triangulation<deal_II_dimension, deal_II_space_dimension>;
parallel::fullydistributed::Triangulation<deal_II_dimension, deal_II_space_dimension>; }
+TRILINOS_SCALARS := { @DEAL_II_EXPAND_TPETRA_TYPES@; }
+
// all supported logical dimensions
DIMENSIONS := { 1; 2; 3 }
if(${DEAL_II_TRILINOS_WITH_TPETRA})
if(DEAL_II_HAVE_TPETRA_INST_DOUBLE)
+ set(DEAL_II_EXPAND_TPETRA_TYPES "double")
set(DEAL_II_EXPAND_TPETRA_VECTOR_DOUBLE "LinearAlgebra::TpetraWrappers::Vector<double>")
endif()
if(DEAL_II_HAVE_TPETRA_INST_FLOAT)
--- /dev/null
+New: LinearAlgebra::TpetraWrappers::SparseMatrix class
+that implements a wrapper for Tpetra::CrsMatrix.
+<br>
+(Sebastian Kinnewig, 2023/11/22)
+
{
template <typename Number>
class Vector;
- }
+
+ template <typename Number, typename NodeType>
+ class SparseMatrix;
+ } // namespace TpetraWrappers
# endif
} // namespace LinearAlgebra
#endif
#include <deal.II/lac/trilinos_block_sparse_matrix.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_tpetra_sparse_matrix.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <boost/serialization/complex.hpp>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_trilinos_tpetra_sparse_matrix_h
+#define dealii_trilinos_tpetra_sparse_matrix_h
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+# include <deal.II/base/index_set.h>
+# include <deal.II/base/subscriptor.h>
+# include <deal.II/base/trilinos_utilities.h>
+
+# include <deal.II/lac/trilinos_tpetra_sparsity_pattern.h>
+# include <deal.II/lac/trilinos_tpetra_vector.h>
+
+// Tpetra includes
+# include <Tpetra_Core.hpp>
+# include <Tpetra_CrsMatrix.hpp>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+// forward declarations
+# ifndef DOXYGEN
+namespace LinearAlgebra
+{
+ namespace TpetraWrappers
+ {
+ template <typename NodeType>
+ class SparsityPattern;
+ } // namespace TpetraWrappers
+} // namespace LinearAlgebra
+# endif
+
+namespace LinearAlgebra
+{
+
+ namespace TpetraWrappers
+ {
+ /**
+ * This class implements a wrapper to use the Trilinos distributed sparse
+ * matrix class
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/tpetra/doc/html/classTpetra_1_1CrsMatrix.html">Tpetra::CrsMatrix</a>.
+ * This is precisely the kind of matrix we deal with all the time - we
+ * most likely get it from some assembly process, where also entries not
+ * locally owned might need to be written and hence need to be forwarded
+ * to the owner process. This class is designed to be used in a distributed
+ * memory architecture with an MPI compiler on the bottom, but it works
+ * equally well for serial processes. The only requirement for this class to
+ * work is that Trilinos has been installed with the same compiler as is
+ * used for generating deal.II.
+ *
+ * Moreover, this class takes an optional template argument for
+ * Kokkos::Nodes, allowing the usage of different Kokkos::Nodes.
+ * Kokkos allows the writing of portable applications targeting,
+ * for example, CUDA, OpenMP, Serial, or Threads, as backends for
+ * the execution and memory spaces. The backend is chosen by
+ * choosing the corresponding Kokkos Node.
+ *
+ * The interface of this class is modeled after the existing SparseMatrix
+ * class in deal.II. It has almost the same member functions and is often
+ * exchangeable. This class is templated and can be used with different
+ * scalar types. However, Trilinos need to be installed with complex support
+ * for usage with complex scalar types.
+ *
+ * @note You need to call SparseMatrix::compress() before you actually use
+ * the matrix. This calls
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/tpetra/doc/html/classTpetra_1_1CrsMatrix.html#aa985b225a24d2f74602e25b38b4430af">Tpetra::fillComplete</a>
+ * that compresses the storage format for sparse matrices by discarding
+ * unused elements and prepares the matrix for further usage
+ * (e.g., for matrix-vector products).
+ * However, to continue assembling the matrix, you need to call
+ * SparseMatrix::resume_fill() first. Once you finish modifying
+ * the matrix, you must call SparseMatrix::compress() again.
+ */
+ template <typename Number,
+ typename NodeType =
+ Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>
+ class SparseMatrix : public Subscriptor
+ {
+ public:
+ /**
+ * Declare the type for container size.
+ */
+ using size_type = dealii::types::global_dof_index;
+
+ /**
+ * Declare an alias for the type used to store matrix elements, in analogy
+ * to all the other container classes.
+ */
+ using value_type = Number;
+
+ /**
+ * Typedef for Tpetra::CrsMatrix
+ */
+ using MatrixType =
+ Tpetra::CrsMatrix<Number, int, dealii::types::signed_global_dof_index>;
+
+ /**
+ * Typedef for Tpetra::Map
+ */
+ using MapType = Tpetra::Map<int, dealii::types::signed_global_dof_index>;
+
+ /**
+ * Typedef for Tpetra::CrsGraph
+ */
+ using GraphType =
+ Tpetra::CrsGraph<int, dealii::types::signed_global_dof_index>;
+
+ /**
+ * @name Constructors and initialization.
+ */
+ /** @{ */
+ /**
+ * Default constructor. Generates an empty (zero-size) matrix.
+ */
+ SparseMatrix();
+
+ /**
+ * Generate a matrix from a TpetraWrappers::SparsityPattern object.
+ */
+ SparseMatrix(const SparsityPattern<NodeType> &sparsity_pattern);
+
+ /**
+ * Move constructor. Create a new sparse matrix by stealing the internal
+ * data of the `other` object.
+ */
+ SparseMatrix(SparseMatrix<Number, NodeType> &&other) noexcept;
+
+ /**
+ * Copy constructor is deleted.
+ */
+ SparseMatrix(const SparseMatrix<Number, NodeType> &) = delete;
+
+ /**
+ * operator= is deleted.
+ */
+ SparseMatrix<Number, NodeType> &
+ operator=(const SparseMatrix<Number, NodeType> &) = delete;
+
+ /**
+ * Move assignment operator.
+ */
+ SparseMatrix<Number, NodeType> &
+ operator=(SparseMatrix<Number, NodeType> &&other) noexcept;
+
+ /**
+ * Destructor. Made virtual so that one can use pointers to objects of
+ * this class.
+ */
+ virtual ~SparseMatrix() override = default;
+
+ /**
+ * This function initializes the Trilinos matrix with a deal.II sparsity
+ * pattern, i.e. it makes the underlying Trilinos Tpetra::CrsMatrix know
+ * the position of nonzero entries according to the sparsity pattern. This
+ * function is meant for use in serial programs, where there is no need to
+ * specify how the matrix is going to be distributed among different
+ * processors. This function works in %parallel, too, but it is
+ * recommended to manually specify the %parallel partitioning of the
+ * matrix using a Tpetra::Map. When run in %parallel, it is currently
+ * necessary that each processor holds the sparsity_pattern structure
+ * because each processor sets its rows.
+ *
+ * This is a collective operation that needs to be called on all
+ * processors in order to avoid a dead lock.
+ */
+ template <typename SparsityPatternType>
+ void
+ reinit(const SparsityPatternType &sparsity_pattern);
+
+ /**
+ * This function reinitializes the Trilinos sparse matrix from a
+ * (possibly distributed) Trilinos sparsity pattern. It also works
+ * in parallel. In that case, the partitioning of the Trilinos
+ * sparsity pattern is used.
+ *
+ * This is a collective operation that needs to be called on all
+ * processors in order to avoid a dead lock.
+ */
+ void
+ reinit(const SparsityPattern<NodeType> &sparsity_pattern);
+ /** @} */
+
+ /**
+ * @name Constructors and initialization using an IndexSet description
+ */
+ /** @{ */
+ /**
+ * Constructor using an IndexSet and an MPI communicator to describe the
+ * %parallel partitioning. The parameter @p n_max_entries_per_row sets the
+ * number of nonzero entries in each row that will be allocated. Note that
+ * this number does not need to be exact, and it is even allowed that the
+ * actual matrix structure has more nonzero entries than specified in the
+ * constructor. However it is still advantageous to provide good estimates
+ * here since this will considerably increase the performance of the
+ * matrix setup. However, there is no effect in the performance of
+ * matrix-vector products, since Trilinos reorganizes the matrix memory
+ * prior to use (in the compress() step).
+ */
+ SparseMatrix(const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * Same as before, but now set the number of non-zero entries in each
+ * matrix row separately. Since we know the number of elements in the
+ * matrix exactly in this case, we can already allocate the right amount
+ * of memory, which makes the creation process including the insertion of
+ * nonzero elements by the respective SparseMatrix::reinit call
+ * considerably faster.
+ */
+ SparseMatrix(const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This constructor is similar to the one above, but it now takes two
+ * different IndexSet partitions for row and columns. This interface is
+ * meant to be used for generating rectangular matrices, where the first
+ * index set describes the %parallel partitioning of the degrees of
+ * freedom associated with the matrix rows and the second one the
+ * partitioning of the matrix columns. The second index set specifies the
+ * partitioning of the vectors this matrix is to be multiplied with, not
+ * the distribution of the elements that actually appear in the matrix.
+ *
+ * The parameter @p n_max_entries_per_row defines how much memory will be
+ * allocated for each row. This number does not need to be accurate, as
+ * the structure is reorganized in the compress() call.
+ */
+ SparseMatrix(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const size_type n_max_entries_per_row = 0);
+
+ /**
+ * Same as before, but now set the number of non-zero entries in each
+ * matrix row separately. Since we know the number of elements in the
+ * matrix exactly in this case, we can already allocate the right amount
+ * of memory, which makes the creation process including the insertion of
+ * nonzero elements by the respective SparseMatrix::reinit call
+ * considerably faster.
+ */
+ SparseMatrix(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This function is initializes the Trilinos Tpetra matrix according to
+ * the specified @p sparsity_pattern, and also reassigns the matrix rows to
+ * different processes according to the user-supplied index set @p parallel_partitioning and
+ * %parallel communicator. In programs following the style of the tutorial
+ * programs, this function (and the respective call for a rectangular
+ * matrix) are the natural way to initialize the matrix size, its
+ * distribution among the MPI processes (if run in %parallel) as well as
+ * the location of non-zero elements. Trilinos stores the sparsity pattern
+ * internally, so it won't be needed any more after this call, in contrast
+ * to the deal.II own object. The optional argument @p exchange_data can
+ * be used for reinitialization with a sparsity pattern that is not fully
+ * constructed. If the flag is not set, each
+ * processor just sets the elements in the sparsity pattern that belong to
+ * its rows.
+ *
+ * This is a collective operation that needs to be called on all
+ * processors in order to avoid a dead lock.
+ */
+ template <typename SparsityPatternType>
+ void
+ reinit(const IndexSet ¶llel_partitioning,
+ const SparsityPatternType &sparsity_pattern,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+
+ /**
+ * This function is similar to the other initialization function above,
+ * but now also reassigns the matrix rows and columns according to two
+ * user-supplied index sets. To be used for rectangular matrices. The
+ * optional argument @p exchange_data can be used for reinitialization
+ * with a sparsity pattern that is not fully constructed. This feature is
+ * only implemented for input sparsity patterns of type
+ * DynamicSparsityPattern.
+ *
+ * This is a collective operation that needs to be called on all
+ * processors in order to avoid a dead lock.
+ */
+ template <typename SparsityPatternType>
+ void
+ reinit(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const SparsityPatternType &sparsity_pattern,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+ /** @} */
+
+ /**
+ * @name Information on the matrix
+ */
+ /** @{ */
+ /**
+ * Return the number of rows in this matrix.
+ */
+ dealii::types::signed_global_dof_index
+ m() const;
+
+ /**
+ * Return the number of columns in this matrix.
+ */
+ dealii::types::signed_global_dof_index
+ n() const;
+
+
+ /**
+ * Return the local dimension of the matrix, i.e. the number of rows
+ * stored on the present MPI process. For sequential matrices, this number
+ * is the same as m(), but for %parallel matrices it may be smaller.
+ *
+ * To figure out which elements exactly are stored locally, use
+ * local_range().
+ */
+ unsigned int
+ local_size() const;
+
+ /**
+ * Return a pair of indices indicating which rows of this matrix are
+ * stored locally. The first number is the index of the first row stored,
+ * the second the index of the one past the last one that is stored
+ * locally. If this is a sequential matrix, then the result will be the
+ * pair (0,m()), otherwise it will be a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<size_type, size_type>
+ local_range() const;
+
+ /**
+ * Return the total number of nonzero elements of this matrix (summed
+ * over all MPI processes).
+ */
+ size_t
+ n_nonzero_elements() const;
+
+ /**
+ * Return the state of the matrix, i.e., whether compress() needs to be
+ * called after an operation requiring data exchange. A call to compress()
+ * is also needed when the method set() has been called (even when working
+ * in serial).
+ */
+ bool
+ is_compressed() const;
+
+ /**
+ * Return the underlying MPI communicator.
+ */
+ MPI_Comm
+ get_mpi_communicator() const;
+ /** @} */
+
+ /**
+ * @name Modifying entries
+ */
+ /** @{ */
+ /**
+ * This operator assigns a scalar to a matrix. Since this does usually not
+ * make much sense (should we set all matrix entries to this value? Only
+ * the nonzero entries of the sparsity pattern?), this operation is only
+ * allowed if the actual value to be assigned is zero. This operator only
+ * exists to allow for the obvious notation <tt>matrix=0</tt>, which sets
+ * all elements of the matrix to zero, but keeps the sparsity pattern
+ * previously used.
+ */
+ SparseMatrix &
+ operator=(const double d);
+
+ /**
+ * Multiply the entire matrix by a fixed factor.
+ */
+ SparseMatrix &
+ operator*=(const Number factor);
+
+ /**
+ * Divide the entire matrix by a fixed factor.
+ */
+ SparseMatrix &
+ operator/=(const Number factor);
+
+ /**
+ * Add @p value to the element (<i>i,j</i>).
+ * Just as the respective call in deal.II SparseMatrix<Number, NodeType>
+ * class. Moreover, if <tt>value</tt> is not a finite number an exception
+ * is thrown.
+ *
+ * @note When add is called on a compressed matrix, the matrix is set
+ * back to an uncompressed state.
+ */
+ void
+ add(const size_type i, const size_type j, const TrilinosScalar value);
+
+ /**
+ * Add an array of values given by <tt>values</tt> in the given global
+ * matrix row at columns specified by col_indices in the sparse matrix.
+ * Just as the respective call in deal.II SparseMatrix<Number, NodeType>
+ * class. The optional parameter <tt>elide_zero_values</tt> can be used to
+ * specify whether zero values should be added anyway or these should be
+ * filtered away and only non-zero data is added.
+ * The default value is <tt>true</tt>, i.e., zero values won't be added
+ * into the matrix.
+ *
+ * @note When add is called on a compressed matrix, the matrix is set
+ * back to an uncompressed state.
+ */
+ void
+ add(const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const TrilinosScalar *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+ /** @} */
+
+ /**
+ * @name Multiplications
+ */
+ /** @{ */
+ /*
+ * Matrix-vector multiplication: let <i>dst = M*src</i> with <i>M</i>
+ * being this matrix.
+ *
+ * Source and destination must not be the same vector.
+ *
+ * The vector @p dst has to be initialized with the same IndexSet that was
+ * used for the row indices of the matrix and the vector @p src has to be
+ * initialized with the same IndexSet that was used for the column indices
+ * of the matrix.
+ */
+ void
+ vmult(Vector<Number> &dst, const Vector<Number> &src) const;
+
+ /*
+ * Matrix-vector multiplication: let <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this matrix. This function does the same as vmult() but
+ * takes the transposed matrix.
+ *
+ * Source and destination must not be the same vector.
+ */
+ void
+ Tvmult(Vector<Number> &dst, const Vector<Number> &src) const;
+
+ /**
+ * Adding matrix-vector multiplication. Add <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this matrix.
+ *
+ * Source and destination must not be the same vector.
+ */
+ void
+ vmult_add(Vector<Number> &dst, const Vector<Number> &src) const;
+
+
+ /**
+ * Adding matrix-vector multiplication. Add <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being this matrix. This function does the same
+ * as vmult_add() but takes the transposed matrix.
+ *
+ * Source and destination must not be the same vector.
+ */
+ void
+ Tvmult_add(Vector<Number> &dst, const Vector<Number> &src) const;
+ /** @} */
+
+ /**
+ * @name Mixed Stuff
+ */
+ /** @{ */
+ /**
+ * Print the matrix to the given stream, using the format (line,col)
+ * value, i.e. one nonzero entry of the matrix per line. The optional flag
+ * outputs the sparsity pattern in Trilinos style, where the data is
+ * sorted according to the processor number when printed to the stream, as
+ * well as a summary of the matrix like the global size.
+ */
+ void
+ print(std::ostream &out,
+ const bool print_detailed_trilinos_information = false) const;
+
+ /**
+ * This command does two things:
+ * <ul>
+ * <li> If the matrix was initialized without a sparsity pattern, elements
+ * have been added manually using the set() command. When this process is
+ * completed, a call to compress() reorganizes the internal data
+ * structures (sparsity pattern) so that a fast access to data is possible
+ * in matrix-vector products.
+ * <li> If the matrix structure has already been fixed (either by
+ * initialization with a sparsity pattern or by calling compress() during
+ * the setup phase), this command does the %parallel exchange of data.
+ * This is necessary when we perform assembly on more than one (MPI)
+ * process, because then some non-local row data will accumulate on nodes
+ * that belong to the current's processor element, but are actually held
+ * by another. This command is usually called after all elements have been
+ * traversed.
+ * </ul>
+ *
+ * In both cases, this function compresses the data structures and allows
+ * the resulting matrix to be used in all other operations like matrix-
+ * vector products. This is a collective operation, i.e., it needs to be
+ * run on all processors when used in %parallel.
+ *
+ * See
+ * @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ *
+ * @note The @p operation can be safely omitted, as that parameter is not
+ * used at all and is only present to ensure compatibility with other
+ * SparseMatrix classes.
+ */
+ void
+ compress(VectorOperation::values operation);
+
+ /**
+ * This function must be called to allow for changes to the structure
+ * of the matrix again after compress() was called.
+ * Once you are done modifying the matrix structure, you must call
+ * compress() again.
+ */
+ void
+ resume_fill();
+
+ /**
+ * Return a const reference to the underlying Trilinos
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/tpetra/doc/html/classTpetra_1_1CrsMatrix.html">Tpetra::CrsMatrix</a>
+ * class.
+ */
+ const MatrixType &
+ trilinos_matrix() const;
+
+ /**
+ * Return a (modifiable) reference to the underlying Trilinos
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/tpetra/doc/html/classTpetra_1_1CrsMatrix.html">Tpetra::CrsMatrix</a>
+ * class.
+ */
+ MatrixType &
+ trilinos_matrix();
+
+ /**
+ * Return a const
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/teuchos/doc/html/classTeuchos_1_1RCP.html">Teuchos::RCP</a>
+ * to the underlying Trilinos
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/tpetra/doc/html/classTpetra_1_1CrsMatrix.html">Tpetra::CrsMatrix</a>
+ * class.
+ */
+ Teuchos::RCP<const MatrixType>
+ trilinos_rcp() const;
+
+ /**
+ * Return a (modifiable)
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/teuchos/doc/html/classTeuchos_1_1RCP.html">Teuchos::RCP</a>
+ * to the underlying Trilinos
+ * <a
+ * href="https://docs.trilinos.org/dev/packages/tpetra/doc/html/classTpetra_1_1CrsMatrix.html">Tpetra::CrsMatrix</a>
+ * class.
+ */
+ Teuchos::RCP<MatrixType>
+ trilinos_rcp();
+ /** @} */
+
+ /**
+ * @addtogroup Exceptions
+ */
+ /** @{ */
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcMatrixNotCompressed);
+
+ /**
+ * Exception
+ */
+ DeclExceptionMsg(
+ ExcSourceEqualsDestination,
+ "You are attempting an operation on two vectors that "
+ "are the same object, but the operation requires that the "
+ "two objects are in fact different.");
+
+ /*
+ * Exception
+ */
+ DeclExceptionMsg(ExcColMapMissmatch,
+ "The column partitioning of a matrix does not match "
+ "the partitioning of a vector you are trying to "
+ "multiply it with. Are you multiplying the "
+ "matrix with a vector that has ghost elements?");
+
+ /*
+ * Exception
+ */
+ DeclExceptionMsg(ExcDomainMapMissmatch,
+ "The row partitioning of a matrix does not match "
+ "the partitioning of a vector you are trying to "
+ "put the result of a matrix-vector product in. "
+ "Are you trying to put the product of the "
+ "matrix with a vector into a vector that has "
+ "ghost elements?");
+ /** @} */
+
+ private:
+ /**
+ * Pointer to the user-supplied Tpetra Trilinos mapping of the matrix
+ * columns that assigns parts of the matrix to the individual processes.
+ *
+ * @note The Trilinos matrix is row-oriented, and the row_space_map is
+ * therefore stored in the Trilinos matrix itself. The additional
+ * information from the column space map is used to speed up the
+ * assembly process.
+ */
+ Teuchos::RCP<MapType> column_space_map;
+
+ /**
+ * A sparse matrix object in Trilinos to be used for finite element based
+ * problems which allows for assembling into non-local elements. The
+ * actual type, a sparse matrix, is set in the constructor.
+ */
+ Teuchos::RCP<MatrixType> matrix;
+
+ /**
+ * A boolean variable to hold information on whether the matrix is
+ * fill complete or if the matrix is in compute mode.
+ */
+ bool compressed;
+
+ }; // class SparseMatrix
+
+
+ /* ------------------------- Inline functions ---------------------- */
+
+ template <typename Number, typename NodeType>
+ inline void
+ SparseMatrix<Number, NodeType>::add(const size_type i,
+ const size_type j,
+ const TrilinosScalar value)
+ {
+ add(i, 1, &j, &value, false);
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline dealii::types::signed_global_dof_index
+ SparseMatrix<Number, NodeType>::m() const
+ {
+ return matrix->getRowMap()->getGlobalNumElements();
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline dealii::types::signed_global_dof_index
+ SparseMatrix<Number, NodeType>::n() const
+ {
+ // If the matrix structure has not been fixed (i.e., we did not have a
+ // sparsity pattern), it does not know about the number of columns, so we
+ // must always take this from the additional column space map
+ Assert(column_space_map.get() != nullptr, ExcInternalError());
+ return column_space_map->getGlobalNumElements();
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline bool
+ SparseMatrix<Number, NodeType>::is_compressed() const
+ {
+ return compressed;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline const Tpetra::
+ CrsMatrix<Number, int, types::signed_global_dof_index> &
+ SparseMatrix<Number, NodeType>::trilinos_matrix() const
+ {
+ return *matrix;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline Tpetra::CrsMatrix<Number, int, types::signed_global_dof_index> &
+ SparseMatrix<Number, NodeType>::trilinos_matrix()
+ {
+ return *matrix;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline Teuchos::RCP<
+ const Tpetra::CrsMatrix<Number, int, types::signed_global_dof_index>>
+ SparseMatrix<Number, NodeType>::trilinos_rcp() const
+ {
+ return matrix.getConst();
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline Teuchos::RCP<
+ Tpetra::CrsMatrix<Number, int, types::signed_global_dof_index>>
+ SparseMatrix<Number, NodeType>::trilinos_rcp()
+ {
+ return matrix;
+ }
+
+ } // namespace TpetraWrappers
+
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_TRILINOS_WITH_TPETRA
+
+#endif // dealii_trilinos_tpetra_sparse_matrix_h
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_trilinos_tpetra_sparse_matrix_templates_h
+#define dealii_trilinos_tpetra_sparse_matrix_templates_h
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+# include <deal.II/lac/dynamic_sparsity_pattern.h>
+# include <deal.II/lac/trilinos_tpetra_sparse_matrix.h>
+# include <deal.II/lac/trilinos_tpetra_sparsity_pattern.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+
+ namespace TpetraWrappers
+ {
+ // reinit_matrix():
+ namespace
+ {
+ using size_type = dealii::types::signed_global_dof_index;
+
+ template <typename NodeType>
+ using MapType =
+ Tpetra::Map<int, dealii::types::signed_global_dof_index, NodeType>;
+
+ template <typename Number, typename NodeType>
+ using MatrixType =
+ Tpetra::CrsMatrix<Number,
+ int,
+ dealii::types::signed_global_dof_index,
+ NodeType>;
+
+ template <typename NodeType>
+ using GraphType =
+ Tpetra::CrsGraph<int, dealii::types::signed_global_dof_index, NodeType>;
+
+ template <typename Number,
+ typename NodeType,
+ typename SparsityPatternType>
+ void
+ reinit_matrix(const IndexSet &row_parallel_partitioning,
+ const IndexSet &column_parallel_partitioning,
+ const SparsityPatternType &sparsity_pattern,
+ const bool exchange_data,
+ const MPI_Comm communicator,
+ Teuchos::RCP<MapType<NodeType>> &column_space_map,
+ Teuchos::RCP<MatrixType<Number, NodeType>> &matrix)
+ {
+ // release memory before reallocation
+ matrix.reset();
+
+ // Get the Tpetra::Maps
+ Teuchos::RCP<MapType<NodeType>> row_space_map =
+ row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+
+ column_space_map =
+ column_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+
+ if (column_space_map->getComm()->getRank() == 0)
+ {
+ AssertDimension(sparsity_pattern.n_rows(),
+ row_parallel_partitioning.size());
+ AssertDimension(sparsity_pattern.n_cols(),
+ column_parallel_partitioning.size());
+ }
+
+ // if we want to exchange data, build a usual Trilinos sparsity pattern
+ // and let that handle the exchange. otherwise, manually create a
+ // CrsGraph, which consumes considerably less memory because it can set
+ // correct number of indices right from the start
+ if (exchange_data)
+ {
+ SparsityPattern trilinos_sparsity;
+ trilinos_sparsity.reinit(row_parallel_partitioning,
+ column_parallel_partitioning,
+ sparsity_pattern,
+ communicator,
+ exchange_data);
+ matrix = Utilities::Trilinos::internal::make_rcp<
+ MatrixType<Number, NodeType>>(
+ trilinos_sparsity.trilinos_sparsity_pattern());
+
+ return;
+ }
+
+ IndexSet relevant_rows(sparsity_pattern.row_index_set());
+ // serial case
+ if (relevant_rows.size() == 0)
+ {
+ relevant_rows.set_size(row_space_map->getGlobalNumElements());
+ relevant_rows.add_range(0, row_space_map->getGlobalNumElements());
+ }
+ relevant_rows.compress();
+
+
+ std::vector<TrilinosWrappers::types::int_type> ghost_rows;
+ Teuchos::Array<size_t> n_entries_per_row(
+ row_space_map->getLocalNumElements());
+ {
+ size_type own = 0;
+ for (const auto global_row : relevant_rows)
+ {
+ if (row_space_map->isNodeGlobalElement(global_row))
+ n_entries_per_row[own++] =
+ sparsity_pattern.row_length(global_row);
+ }
+ }
+
+ // The deal.II notation of a Sparsity pattern corresponds to the Tpetra
+ // concept of a Graph. Hence, we generate a graph by copying the
+ // sparsity pattern into it, and then build up the matrix from the
+ // graph. This is considerable faster than directly filling elements
+ // into the matrix. Moreover, it consumes less memory, since the
+ // internal reordering is done on ints only, and we can leave the
+ // doubles aside.
+ Teuchos::RCP<GraphType<NodeType>> graph;
+
+ graph = Utilities::Trilinos::internal::make_rcp<GraphType<NodeType>>(
+ row_space_map, n_entries_per_row());
+
+ // This functions assumes that the sparsity pattern sits on all
+ // processors (completely). The parallel version uses a Tpetra graph
+ // that is already distributed.
+
+ // now insert the indices
+ std::vector<TrilinosWrappers::types::int_type> row_indices;
+
+ for (const auto global_row : relevant_rows)
+ {
+ const int row_length = sparsity_pattern.row_length(global_row);
+ if (row_length == 0)
+ continue;
+
+ row_indices.resize(row_length, -1);
+ for (size_type col = 0; col < row_length; ++col)
+ row_indices[col] =
+ sparsity_pattern.column_number(global_row, col);
+
+ AssertIndexRange(global_row, row_space_map->getGlobalNumElements());
+ graph->insertGlobalIndices(global_row,
+ row_length,
+ row_indices.data());
+ }
+
+ // Eventually, optimize the graph structure (sort indices, make memory
+ // contiguous, etc.). note that the documentation of the function indeed
+ // states that we first need to provide the column (domain) map and then
+ // the row (range) map
+ graph->fillComplete(column_space_map, row_space_map);
+
+ // check whether we got the number of columns right.
+ AssertDimension(sparsity_pattern.n_cols(), graph->getGlobalNumCols());
+
+ // And now finally generate the matrix.
+ matrix =
+ Utilities::Trilinos::internal::make_rcp<MatrixType<Number, NodeType>>(
+ graph);
+ }
+ } // namespace
+
+
+
+ // Constructors and initialization:
+
+ // The constructor is actually the only point where we have to check
+ // whether we build a serial or a parallel Trilinos matrix.
+ // Actually, it does not even matter how many threads there are, but
+ // only if we use an MPI compiler or a standard compiler. So, even one
+ // thread on a configuration with MPI will still get a parallel interface.
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType>::SparseMatrix()
+ : column_space_map(Utilities::Trilinos::internal::make_rcp<MapType>(
+ 0,
+ 0,
+ Utilities::Trilinos::tpetra_comm_self()))
+ {
+ // Prepare the graph
+ Teuchos::RCP<GraphType> graph =
+ Utilities::Trilinos::internal::make_rcp<GraphType>(column_space_map,
+ column_space_map,
+ 0);
+ graph->fillComplete();
+
+ // Create the matrix from the graph
+ matrix = Utilities::Trilinos::internal::make_rcp<MatrixType>(graph);
+
+ compressed = false;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType>::SparseMatrix(
+ const SparsityPattern<NodeType> &sparsity_pattern)
+ : matrix(Utilities::Trilinos::internal::make_rcp<MatrixType>(
+ sparsity_pattern.trilinos_sparsity_pattern()))
+ {
+ column_space_map =
+ Teuchos::rcp_const_cast<MapType>(sparsity_pattern.domain_partitioner());
+ compressed = false;
+ compress(VectorOperation::add);
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType>::SparseMatrix(
+ SparseMatrix<Number, NodeType> &&other) noexcept
+ : column_space_map(std::move(other.column_space_map))
+ , matrix(std::move(other.matrix))
+ , compressed(std::move(other.compressed))
+ {
+ other.compressed = false;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType> &
+ SparseMatrix<Number, NodeType>::operator=(
+ SparseMatrix<Number, NodeType> &&other) noexcept
+ {
+ column_space_map = std::move(other.column_space_map);
+ matrix = std::move(other.matrix);
+ compressed = std::move(other.compressed);
+
+ return *this;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ template <typename SparsityPatternType>
+ void
+ SparseMatrix<Number, NodeType>::reinit(
+ const SparsityPatternType &sparsity_pattern)
+ {
+ reinit_matrix<Number, NodeType, SparsityPatternType>(
+ complete_index_set(sparsity_pattern.n_rows()),
+ complete_index_set(sparsity_pattern.n_cols()),
+ sparsity_pattern,
+ false,
+ MPI_COMM_SELF,
+ column_space_map,
+ matrix);
+
+ compressed = false;
+ compress(VectorOperation::add);
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::reinit(
+ const SparsityPattern<NodeType> &sparsity_pattern)
+ {
+ column_space_map.reset();
+ matrix.reset();
+
+ // reinit with a (distributed) Trilinos sparsity pattern.
+ column_space_map =
+ Teuchos::rcp_const_cast<MapType>(sparsity_pattern.domain_partitioner());
+ matrix = Utilities::Trilinos::internal::make_rcp<MatrixType>(
+ sparsity_pattern.trilinos_sparsity_pattern());
+
+ compressed = false;
+ compress(VectorOperation::add);
+ }
+
+
+
+ // Constructors and initialization using an IndexSet description:
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType>::SparseMatrix(
+ const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const unsigned int n_max_entries_per_row)
+ : column_space_map(
+ parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ , matrix(Utilities::Trilinos::internal::make_rcp<MatrixType>(
+ column_space_map,
+ n_max_entries_per_row))
+ , compressed(false)
+ {}
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType>::SparseMatrix(
+ const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<unsigned int> &n_entries_per_row)
+ : column_space_map(
+ parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ , compressed(false)
+ {
+ Teuchos::Array<size_t> n_entries_per_row_array(n_entries_per_row.begin(),
+ n_entries_per_row.end());
+ matrix = Utilities::Trilinos::internal::make_rcp<MatrixType>(
+ column_space_map, n_entries_per_row_array());
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType>::SparseMatrix(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const size_type n_max_entries_per_row)
+ : column_space_map(
+ col_parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ , matrix(Utilities::Trilinos::internal::make_rcp<MatrixType>(
+ row_parallel_partitioning.make_tpetra_map_rcp(communicator, false),
+ n_max_entries_per_row))
+ , compressed(false)
+ {}
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType>::SparseMatrix(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<unsigned int> &n_entries_per_row)
+ : column_space_map(
+ col_parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ , compressed(false)
+ {
+ Teuchos::Array<size_t> n_entries_per_row_array(n_entries_per_row.begin(),
+ n_entries_per_row.end());
+ matrix = Utilities::Trilinos::internal::make_rcp<MatrixType>(
+ row_parallel_partitioning.make_tpetra_map_rcp(communicator, false),
+ n_entries_per_row_array());
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ template <typename SparsityPatternType>
+ inline void
+ SparseMatrix<Number, NodeType>::reinit(
+ const IndexSet ¶llel_partitioning,
+ const SparsityPatternType &sparsity_pattern,
+ const MPI_Comm communicator,
+ const bool exchange_data)
+ {
+ reinit(parallel_partitioning,
+ parallel_partitioning,
+ sparsity_pattern,
+ communicator,
+ exchange_data);
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ template <typename SparsityPatternType>
+ void
+ SparseMatrix<Number, NodeType>::reinit(
+ const IndexSet &row_parallel_partitioning,
+
+ const IndexSet &col_parallel_partitioning,
+ const SparsityPatternType &sparsity_pattern,
+ const MPI_Comm communicator,
+ const bool exchange_data)
+ {
+ reinit_matrix<Number, NodeType, SparsityPatternType>(
+ row_parallel_partitioning,
+ col_parallel_partitioning,
+ sparsity_pattern,
+ exchange_data,
+ communicator,
+ column_space_map,
+ matrix);
+
+ compressed = false;
+ compress(VectorOperation::add);
+ }
+
+
+
+ // Information on the matrix
+
+ template <typename Number, typename NodeType>
+ inline unsigned int
+ SparseMatrix<Number, NodeType>::local_size() const
+ {
+ return matrix->getLocalNumRows();
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline std::pair<typename SparseMatrix<Number, NodeType>::size_type,
+ typename SparseMatrix<Number, NodeType>::size_type>
+ SparseMatrix<Number, NodeType>::local_range() const
+ {
+ size_type begin, end;
+ begin = matrix->getRowMap()->getMinLocalIndex();
+ end = matrix->getRowMap()->getMaxLocalIndex() + 1;
+
+ return std::make_pair(begin, end);
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ inline size_t
+ SparseMatrix<Number, NodeType>::n_nonzero_elements() const
+ {
+ return matrix->getGlobalNumEntries();
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ MPI_Comm
+ SparseMatrix<Number, NodeType>::get_mpi_communicator() const
+ {
+ return Utilities::Trilinos::teuchos_comm_to_mpi_comm(matrix->getComm());
+ }
+
+
+
+ // Modifying entries
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType> &
+ SparseMatrix<Number, NodeType>::operator=(const double d)
+ {
+ (void)d;
+ Assert(d == 0, ExcScalarAssignmentOnlyForZeroValue());
+
+ if (compressed)
+ {
+ matrix->resumeFill();
+ compressed = false;
+ }
+
+ // As checked above, we are only allowed to use d==0.0, so pass
+ // a constant zero (instead of a run-time value 'd' that *happens* to
+ // have a zero value) to the underlying class in hopes that the compiler
+ // can optimize this somehow.
+ matrix->setAllToScalar(/*d=*/0.0);
+
+ return *this;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType> &
+ SparseMatrix<Number, NodeType>::operator*=(const Number a)
+ {
+ matrix->scale(a);
+ return *this;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ SparseMatrix<Number, NodeType> &
+ SparseMatrix<Number, NodeType>::operator/=(const Number a)
+ {
+ Assert(a != 0, ExcDivideByZero());
+
+ const Number factor = 1.0 / a;
+ matrix->scale(factor);
+ return *this;
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::add(const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
+ const TrilinosScalar *values,
+ const bool elide_zero_values,
+ const bool /*col_indices_are_sorted*/)
+ {
+ AssertIndexRange(row, this->m());
+
+ // If the matrix is marked as compressed, we need to
+ // call resumeFill() first.
+ if (compressed || matrix->isFillComplete())
+ {
+ matrix->resumeFill();
+ compressed = false;
+ }
+
+ // count zero entries;
+ const size_t n_zero_entries =
+ (elide_zero_values ? std::count(values, values + n_cols, Number(0)) :
+ 0);
+
+ // Exit early if there is nothing to do
+ if (n_zero_entries == n_cols)
+ return;
+
+ // Convert the input into Teuchos::Array
+ Teuchos::Array<types::signed_global_dof_index> col_indices_array(
+ n_cols - n_zero_entries);
+ Teuchos::Array<Number> values_array(n_cols - n_zero_entries);
+ if (elide_zero_values)
+ {
+ size_t n_columns = 0;
+ for (size_t i = 0; i < n_cols; ++i)
+ {
+ // skip all zero entries, while filling the
+ if (values[i] != 0)
+ {
+ AssertIsFinite(values[i]);
+ AssertIndexRange(col_indices[i], n());
+ AssertIndexRange(n_columns, n_zero_entries);
+ col_indices_array[n_columns] = col_indices[i];
+ values_array[n_columns] = values[i];
+ ++n_columns;
+ }
+ }
+ }
+ else
+ for (size_t i = 0; i < n_cols; ++i)
+ {
+ AssertIsFinite(values[i]);
+ AssertIndexRange(col_indices[i], n());
+ col_indices_array[i] = col_indices[i];
+ values_array[i] = values[i];
+ }
+
+ // Sum the values into the global matrix.
+ matrix->sumIntoGlobalValues(row, col_indices_array, values_array);
+ }
+
+
+
+ // Multiplications
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::vmult(Vector<Number> &dst,
+ const Vector<Number> &src) const
+ {
+ Assert(&src != &dst, ExcSourceEqualsDestination());
+ Assert(matrix->isFillComplete(), ExcMatrixNotCompressed());
+ Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()),
+ ExcColMapMissmatch());
+ Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()),
+ ExcDomainMapMissmatch());
+ matrix->apply(*src.trilinos_rcp(), *dst.trilinos_rcp());
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::Tvmult(Vector<Number> &dst,
+ const Vector<Number> &src) const
+ {
+ Assert(&src != &dst, ExcSourceEqualsDestination());
+ Assert(matrix->isFillComplete(), ExcMatrixNotCompressed());
+ Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()),
+ ExcColMapMissmatch());
+ Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()),
+ ExcDomainMapMissmatch());
+ matrix->apply(*src.trilinos_rcp(), *dst.trilinos_rcp(), Teuchos::TRANS);
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::vmult_add(Vector<Number> &dst,
+ const Vector<Number> &src) const
+ {
+ Assert(&src != &dst, ExcSourceEqualsDestination());
+ Assert(matrix->isFillComplete(), ExcMatrixNotCompressed());
+ Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()),
+ ExcColMapMissmatch());
+ Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()),
+ ExcDomainMapMissmatch());
+ matrix->apply(*src.trilinos_rcp(),
+ *dst.trilinos_rcp(),
+ Teuchos::NO_TRANS,
+ Teuchos::ScalarTraits<Number>::one(),
+ Teuchos::ScalarTraits<Number>::one());
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::Tvmult_add(Vector<Number> &dst,
+ const Vector<Number> &src) const
+ {
+ Assert(&src != &dst, ExcSourceEqualsDestination());
+ Assert(matrix->isFillComplete(), ExcMatrixNotCompressed());
+ Assert(dst.trilinos_rcp()->getMap()->isSameAs(*matrix->getDomainMap()),
+ ExcColMapMissmatch());
+ Assert(src.trilinos_rcp()->getMap()->isSameAs(*matrix->getRangeMap()),
+ ExcDomainMapMissmatch());
+ matrix->apply(*src.trilinos_rcp(),
+ *dst.trilinos_rcp(),
+ Teuchos::TRANS,
+ Teuchos::ScalarTraits<Number>::one(),
+ Teuchos::ScalarTraits<Number>::one());
+ }
+
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::print(
+ std::ostream &out,
+ const bool print_detailed_trilinos_information) const
+ {
+ if (print_detailed_trilinos_information)
+ {
+ auto teuchos_out = Teuchos::getFancyOStream(Teuchos::rcpFromRef(out));
+ matrix->describe(*teuchos_out, Teuchos::VERB_EXTREME);
+ }
+ else
+ {
+ typename MatrixType::values_host_view_type values;
+ typename MatrixType::local_inds_host_view_type indices;
+
+ for (size_t i = 0; i < matrix->getLocalNumRows(); ++i)
+ {
+ matrix->getLocalRowView(i, indices, values);
+
+ for (size_t j = 0; j < indices.size(); ++j)
+ out << "(" << matrix->getRowMap()->getGlobalElement(i) << ","
+ << matrix->getColMap()->getGlobalElement(indices[j]) << ") "
+ << values[j] << std::endl;
+ }
+ }
+
+ AssertThrow(out.fail() == false, ExcIO());
+ }
+
+
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::compress(
+ [[maybe_unused]] VectorOperation::values operation)
+ {
+ if (!compressed)
+ {
+ matrix->fillComplete(column_space_map, matrix->getRowMap());
+ compressed = true;
+ }
+ }
+
+ template <typename Number, typename NodeType>
+ void
+ SparseMatrix<Number, NodeType>::resume_fill()
+ {
+ if (compressed)
+ {
+ matrix->resumeFill();
+ compressed = false;
+ }
+ }
+
+ } // namespace TpetraWrappers
+
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_TRILINOS_WITH_TPETRA
+
+#endif // dealii_trilinos_tpetra_sparse_matrix_templates_h
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_trilinos_tpetra_sparsity_pattern_h
+#define dealii_trilinos_tpetra_sparsity_pattern_h
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+# include <deal.II/base/index_set.h>
+# include <deal.II/base/mpi_stub.h>
+# include <deal.II/base/subscriptor.h>
+
+# include <deal.II/lac/exceptions.h>
+# include <deal.II/lac/sparsity_pattern_base.h>
+# include <deal.II/lac/trilinos_tpetra_sparse_matrix.h>
+
+# include <Tpetra_CrsGraph.hpp>
+
+# include <cmath>
+# include <memory>
+# include <vector>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+// forward declarations
+# ifndef DOXYGEN
+class DynamicSparsityPattern;
+
+namespace LinearAlgebra
+{
+ namespace TpetraWrappers
+ {
+ template <typename NodeType>
+ class SparsityPattern;
+
+ template <typename Number, typename NodeType>
+ class SparseMatrix;
+
+ namespace SparsityPatternIterators
+ {
+ template <typename NodeType>
+ class Iterator;
+ }
+ } // namespace TpetraWrappers
+} // namespace LinearAlgebra
+# endif
+
+namespace LinearAlgebra
+{
+ namespace TpetraWrappers
+ {
+ namespace SparsityPatternIterators
+ {
+ /**
+ * Accessor class for iterators into sparsity patterns. This class is also
+ * the base class for both const and non-const accessor classes into
+ * sparse matrices.
+ *
+ * Note that this class only allows read access to elements, providing
+ * their row and column number. It does not allow modifying the sparsity
+ * pattern itself.
+ *
+ * @ingroup TrilinosWrappers
+ */
+ template <typename NodeType =
+ Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>
+ class Accessor
+ {
+ public:
+ /**
+ * Declare type for container size.
+ */
+ using size_type = dealii::types::signed_global_dof_index;
+
+ /**
+ * Constructor.
+ */
+ Accessor(const SparsityPattern<NodeType> *sparsity_pattern,
+ const size_type row,
+ const size_type index);
+
+ /**
+ * Row number of the element represented by this object.
+ */
+ size_type
+ row() const;
+
+ /**
+ * Index in row of the element represented by this object.
+ */
+ size_type
+ index() const;
+
+ /**
+ * Column number of the element represented by this object.
+ */
+ size_type
+ column() const;
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcBeyondEndOfSparsityPattern);
+
+ /**
+ * Exception
+ */
+ DeclException3(ExcAccessToNonlocalRow,
+ size_type,
+ size_type,
+ size_type,
+ << "You tried to access row " << arg1
+ << " of a distributed sparsity pattern, "
+ << " but only rows " << arg2 << " through " << arg3
+ << " are stored locally and can be accessed.");
+
+ private:
+ /**
+ * The matrix accessed.
+ */
+ SparsityPattern<NodeType> *sparsity_pattern;
+
+ /**
+ * Current row number.
+ */
+ size_type a_row;
+
+ /**
+ * Current index in row.
+ */
+ size_type a_index;
+
+ /**
+ * Cache where we store the column indices of the present row. This is
+ * necessary, since Trilinos makes access to the elements of its
+ * matrices rather hard, and it is much more efficient to copy all
+ * column entries of a row once when we enter it than repeatedly asking
+ * Trilinos for individual ones. This also makes some sense since it is
+ * likely that we will access them sequentially anyway.
+ *
+ * In order to make copying of iterators/accessor of acceptable
+ * performance, we keep a shared pointer to these entries so that more
+ * than one accessor can access this data if necessary.
+ */
+ std::shared_ptr<std::vector<dealii::types::signed_global_dof_index>>
+ colnum_cache;
+
+ /**
+ * Discard the old row caches (they may still be used by other
+ * accessors) and generate new ones for the row pointed to presently by
+ * this accessor.
+ */
+ void
+ visit_present_row();
+
+ // Make enclosing class a friend.
+ friend class Iterator<NodeType>;
+ };
+
+ /**
+ * Iterator class for sparsity patterns of type
+ * TrilinosWrappers::SparsityPattern. Access to individual elements of the
+ * sparsity pattern is handled by the Accessor class in this namespace.
+ */
+ template <typename NodeType =
+ Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>
+ class Iterator
+ {
+ public:
+ /**
+ * Declare type for container size.
+ */
+ using size_type = size_t;
+
+ /**
+ * Constructor. Create an iterator into the matrix @p matrix for the
+ * given row and the index within it.
+ */
+ Iterator(const SparsityPattern<NodeType> *sparsity_pattern,
+ const size_type row,
+ const size_type index);
+
+ /**
+ * Copy constructor.
+ */
+ Iterator(const Iterator<NodeType> &i);
+
+ /**
+ * Prefix increment.
+ */
+ Iterator<NodeType> &
+ operator++();
+
+ /**
+ * Postfix increment.
+ */
+ Iterator
+ operator++(int);
+
+ /**
+ * Dereferencing operator.
+ */
+ const Accessor<NodeType> &
+ operator*() const;
+
+ /**
+ * Dereferencing operator.
+ */
+ const Accessor<NodeType> *
+ operator->() const;
+
+ /**
+ * Comparison. True, if both iterators point to the same matrix
+ * position.
+ */
+ bool
+ operator==(const Iterator<NodeType> &) const;
+
+ /**
+ * Inverse of <tt>==</tt>.
+ */
+ bool
+ operator!=(const Iterator<NodeType> &) const;
+
+ /**
+ * Comparison operator. Result is true if either the first row number is
+ * smaller or if the row numbers are equal and the first index is
+ * smaller.
+ */
+ bool
+ operator<(const Iterator<NodeType> &) const;
+
+ /**
+ * Exception
+ */
+ DeclException2(ExcInvalidIndexWithinRow,
+ size_type,
+ size_type,
+ << "Attempt to access element " << arg2 << " of row "
+ << arg1 << " which doesn't have that many elements.");
+
+ private:
+ /**
+ * Store an object of the accessor class.
+ */
+ Accessor<NodeType> accessor;
+
+ friend class TpetraWrappers::SparsityPattern<NodeType>;
+ };
+
+ } // namespace SparsityPatternIterators
+
+
+ /**
+ * This class implements a wrapper class to use the Trilinos distributed
+ * sparsity pattern class Tpetra::CrsGraph. This class is designed to be
+ * used for construction of %parallel Trilinos matrices. The functionality
+ * of this class is modeled after the existing sparsity pattern classes,
+ * with the difference that this class can work fully in %parallel according
+ * to a partitioning of the sparsity pattern rows.
+ *
+ * This class has many similarities to the DynamicSparsityPattern, since it
+ * can dynamically add elements to the pattern without any memory being
+ * previously reserved for it. However, it also has a method
+ * SparsityPattern<NodeType>::compress(), that finalizes the pattern and
+ * enables its use with Trilinos sparse matrices.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Sparsity
+ */
+ template <typename NodeType =
+ Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>
+ class SparsityPattern : public SparsityPatternBase
+ {
+ public:
+ /**
+ * Declare type for container size.
+ */
+ using size_type = dealii::types::signed_global_dof_index;
+
+ /**
+ * Declare an alias for the iterator class.
+ */
+ using const_iterator = SparsityPatternIterators::Iterator<NodeType>;
+
+ /**
+ * Typedef for Tpetra::Map
+ */
+ using MapType =
+ Tpetra::Map<int, dealii::types::signed_global_dof_index, NodeType>;
+
+ /**
+ * Typedef for Tpetra:Graph
+ */
+ using GraphType =
+ Tpetra::CrsGraph<int, dealii::types::signed_global_dof_index, NodeType>;
+
+ /**
+ * @name Basic constructors and initialization
+ */
+ /** @{ */
+ /**
+ * Default constructor. Generates an empty (zero-size) sparsity pattern.
+ */
+ SparsityPattern();
+
+ /**
+ * Generate a sparsity pattern that is completely stored locally, having
+ * $m$ rows and $n$ columns. The resulting matrix will be completely
+ * stored locally, too.
+ *
+ * It is possible to specify the number of columns entries per row using
+ * the optional @p n_entries_per_row argument. However, this value does
+ * not need to be accurate or even given at all, since one does usually
+ * not have this kind of information before building the sparsity pattern
+ * (the usual case when the function DoFTools::make_sparsity_pattern() is
+ * called). The entries are allocated dynamically in a similar manner as
+ * for the deal.II DynamicSparsityPattern classes. However, a good
+ * estimate will reduce the setup time of the sparsity pattern.
+ */
+ SparsityPattern(const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * Generate a sparsity pattern that is completely stored locally, having
+ * $m$ rows and $n$ columns. The resulting matrix will be completely
+ * stored locally, too.
+ *
+ * The vector <tt>n_entries_per_row</tt> specifies the number of entries
+ * in each row (an information usually not available, though).
+ */
+ SparsityPattern(const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row);
+
+ /**
+ * Move constructor. Create a new sparse matrix by stealing the internal
+ * data.
+ */
+ SparsityPattern(SparsityPattern<NodeType> &&other) noexcept;
+
+ /**
+ * Copy constructor. Sets the calling sparsity pattern to be the same as
+ * the input sparsity pattern.
+ */
+ SparsityPattern(const SparsityPattern<NodeType> &input_sparsity_pattern);
+
+ /**
+ * Destructor. Made virtual so that one can use pointers to this class.
+ */
+ virtual ~SparsityPattern() override = default;
+
+ /**
+ * Initialize a sparsity pattern that is completely stored locally, having
+ * $m$ rows and $n$ columns. The resulting matrix will be completely
+ * stored locally.
+ *
+ * The number of columns entries per row is specified as the maximum
+ * number of entries argument. This does not need to be an accurate
+ * number since the entries are allocated dynamically in a similar manner
+ * as for the deal.II DynamicSparsityPattern classes, but a good estimate
+ * will reduce the setup time of the sparsity pattern.
+ */
+ void
+ reinit(const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * Initialize a sparsity pattern that is completely stored locally, having
+ * $m$ rows and $n$ columns. The resulting matrix will be completely
+ * stored locally.
+ *
+ * The vector <tt>n_entries_per_row</tt> specifies the number of entries
+ * in each row.
+ */
+ void
+ reinit(const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row);
+
+ /**
+ * Copy function. Sets the calling sparsity pattern to be the same as the
+ * input sparsity pattern.
+ */
+ void
+ copy_from(const SparsityPattern<NodeType> &input_sparsity_pattern);
+
+ /**
+ * Copy function from one of the deal.II sparsity patterns. If used in
+ * parallel, this function uses an ad-hoc partitioning of the rows and
+ * columns.
+ */
+ template <typename SparsityPatternType>
+ void
+ copy_from(const SparsityPatternType &nontrilinos_sparsity_pattern);
+
+ /**
+ * Copy operator. This operation is only allowed for empty objects, to
+ * avoid potentially very costly operations automatically synthesized by
+ * the compiler. Use copy_from() instead if you know that you really want
+ * to copy a sparsity pattern with non-trivial content.
+ */
+ SparsityPattern<NodeType> &
+ operator=(const SparsityPattern<NodeType> &input_sparsity_pattern);
+
+ /**
+ * Release all memory and return to a state just like after having called
+ * the default constructor.
+ *
+ * This is a collective operation that needs to be called on all
+ * processors in order to avoid a dead lock.
+ */
+ void
+ clear();
+
+ /**
+ * In analogy to our own SparsityPattern class, this function compresses
+ * the sparsity pattern and allows the resulting pattern to be used for
+ * actually generating a (Trilinos-based) matrix. This function also
+ * exchanges non-local data that might have accumulated during the
+ * addition of new elements. This function must therefore be called once
+ * the structure is fixed. This is a collective operation, i.e., it needs
+ * to be run on all processors when used in parallel.
+ */
+ void
+ compress();
+ /** @} */
+
+ /**
+ * @name Constructors and initialization using an IndexSet description
+ */
+ /** @{ */
+
+ /**
+ * Constructor for a square sparsity pattern using an IndexSet and an MPI
+ * communicator for the description of the %parallel partitioning.
+ * Moreover, the number of nonzero entries in the rows of the sparsity
+ * pattern can be specified. Note that this number does not need to be
+ * exact, and it is even allowed that the actual sparsity structure has
+ * more nonzero entries than specified in the constructor. However it is
+ * still advantageous to provide good estimates here since a good value
+ * will avoid repeated allocation of memory, which considerably increases
+ * the performance when creating the sparsity pattern.
+ */
+ SparsityPattern(const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * Same as before, but now use the exact number of nonzero entries in
+ * each m row. Since we know the number of elements in the sparsity
+ * pattern exactly in this case, we can already allocate the right amount
+ * of memory, which makes the creation process by the respective
+ * SparsityPattern<NodeType>::reinit call considerably faster. However,
+ * this is a rather unusual situation, since knowing the number of entries
+ * in each row is usually connected to knowing the indices of nonzero
+ * entries, which the sparsity pattern is designed to describe.
+ */
+ SparsityPattern(const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row);
+
+ /**
+ * This constructor is similar to the one above, but it now takes two
+ * different index sets to describe the %parallel partitioning of rows and
+ * columns. This interface is meant to be used for generating rectangular
+ * sparsity pattern. Note that there is no real parallelism along the
+ * columns – the processor that owns a certain row always owns all
+ * the column elements, no matter how far they might be spread out. The
+ * second Tpetra::Map is only used to specify the number of columns and
+ * for internal arrangements when doing matrix-vector products with
+ * vectors based on that column map.
+ *
+ * The number of columns entries per row is specified as the maximum
+ * number of entries argument.
+ */
+ SparsityPattern(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * This constructor is similar to the one above, but it now takes two
+ * different index sets for rows and columns. This interface is meant to
+ * be used for generating rectangular matrices, where one map specifies
+ * the %parallel distribution of rows and the second one specifies the
+ * distribution of degrees of freedom associated with matrix columns. This
+ * second map is however not used for the distribution of the columns
+ * themselves – rather, all column elements of a row are stored on
+ * the same processor. The vector <tt>n_entries_per_row</tt> specifies the
+ * number of entries in each row of the newly generated matrix.
+ */
+ SparsityPattern(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row);
+
+ /**
+ * This constructor constructs general sparsity patterns, possible non-
+ * square ones. Constructing a sparsity pattern this way allows the user
+ * to explicitly specify the rows into which we are going to add elements.
+ * This set is required to be a superset of the first index set @p
+ * row_parallel_partitioning that includes also rows that are owned by
+ * another processor (ghost rows). Note that elements can only be added to
+ * rows specified by @p writable_rows.
+ *
+ * This method is beneficial when the rows to which a processor is going
+ * to write can be determined before actually inserting elements into the
+ * matrix. For the typical parallel::distributed::Triangulation class used
+ * in deal.II, we know that a processor only will add row elements for
+ * what we call the locally relevant dofs (see
+ * DoFTools::extract_locally_relevant_dofs). The other constructors
+ * methods use general Trilinos facilities that allow to add elements to
+ * arbitrary rows (as done by all the other reinit functions). However,
+ * this flexibility come at a cost, the most prominent being that adding
+ * elements into the same matrix from multiple threads in shared memory is
+ * not safe whenever MPI is used. For these settings, the current method
+ * is the one to choose: It will store the off-processor data as an
+ * additional sparsity pattern (that is then passed to the Trilinos matrix
+ * via the reinit method) which can be organized in such a way that
+ * thread-safety can be ensured (as long as the user makes sure to never
+ * write into the same matrix row simultaneously, of course).
+ */
+ SparsityPattern(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const IndexSet &writable_rows,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * Reinitialization function for generating a square sparsity pattern
+ * using an IndexSet and an MPI communicator for the description of the
+ * %parallel partitioning and the number of nonzero entries in the rows of
+ * the sparsity pattern. Note that this number does not need to be exact,
+ * and it is even allowed that the actual sparsity structure has more
+ * nonzero entries than specified in the constructor. However it is still
+ * advantageous to provide good estimates here since this will
+ * considerably increase the performance when creating the sparsity
+ * pattern.
+ *
+ * This function does not create any entries by itself, but provides the
+ * correct data structures that can be used by the respective add()
+ * function.
+ */
+ void
+ reinit(const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * Same as before, but now use the exact number of nonzero entries in
+ * each row. Since we know the number of elements in the sparsity pattern
+ * exactly in this case, we can already allocate the right amount of
+ * memory, which makes process of adding entries to the sparsity pattern
+ * considerably faster. However, this is a rather unusual situation, since
+ * knowing the number of entries in each row is usually connected to
+ * knowing the indices of nonzero entries, which the sparsity pattern is
+ * designed to describe.
+ */
+ void
+ reinit(const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row);
+
+ /**
+ * This reinit function is similar to the one above, but it now takes two
+ * different index sets for rows and columns. This interface is meant to
+ * be used for generating rectangular sparsity pattern, where one index
+ * set describes the %parallel partitioning of the dofs associated with
+ * the sparsity pattern rows and the other one of the sparsity pattern
+ * columns. Note that there is no real parallelism along the columns
+ * – the processor that owns a certain row always owns all the
+ * column elements, no matter how far they might be spread out. The second
+ * IndexSet is only used to specify the number of columns and for internal
+ * arrangements when doing matrix-vector products with vectors based on an
+ * Tpetra::Map based on that IndexSet.
+ *
+ * The number of columns entries per row is specified by the argument
+ * <tt>n_entries_per_row</tt>.
+ */
+ void
+ reinit(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * This reinit function is used to specify general matrices, possibly non-
+ * square ones. In addition to the arguments of the other reinit method
+ * above, it allows the user to explicitly specify the rows into which we
+ * are going to add elements. This set is a superset of the first index
+ * set @p row_parallel_partitioning that includes also rows that are owned
+ * by another processor (ghost rows).
+ *
+ * This method is beneficial when the rows to which a processor is going
+ * to write can be determined before actually inserting elements into the
+ * matrix. For the typical parallel::distributed::Triangulation class used
+ * in deal.II, we know that a processor only will add row elements for
+ * what we call the locally relevant dofs (see
+ * DoFTools::extract_locally_relevant_dofs). Trilinos matrices allow to
+ * add elements to arbitrary rows (as done by all the other reinit
+ * functions) and this is what all the other reinit methods do, too.
+ * However, this flexibility come at a cost, the most prominent being that
+ * adding elements into the same matrix from multiple threads in shared
+ * memory is not safe whenever MPI is used. For these settings, the
+ * current method is the one to choose: It will store the off-processor
+ * data as an additional sparsity pattern (that is then passed to the
+ * Trilinos matrix via the reinit method) which can be organized in such a
+ * way that thread-safety can be ensured (as long as the user makes sure
+ * to never write into the same matrix row simultaneously, of course).
+ */
+ void
+ reinit(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const IndexSet &writeable_rows,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const size_type n_entries_per_row = 0);
+
+ /**
+ * Same as before, but now using a vector <tt>n_entries_per_row</tt> for
+ * specifying the number of entries in each row of the sparsity pattern.
+ */
+ void
+ reinit(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row);
+
+ /**
+ * Reinit function. Takes one of the deal.II sparsity patterns and the
+ * %parallel partitioning of the rows and columns specified by two index
+ * sets and a %parallel communicator for initializing the current Trilinos
+ * sparsity pattern. The optional argument @p exchange_data can be used
+ * for reinitialization with a sparsity pattern that is not fully
+ * constructed. This feature is only implemented for input sparsity
+ * patterns of type DynamicSparsityPattern.
+ */
+ template <typename SparsityPatternType>
+ void
+ reinit(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const SparsityPatternType &nontrilinos_sparsity_pattern,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+
+ /**
+ * Reinit function. Takes one of the deal.II sparsity patterns and a
+ * %parallel partitioning of the rows and columns for initializing the
+ * current Trilinos sparsity pattern. The optional argument @p
+ * exchange_data can be used for reinitialization with a sparsity pattern
+ * that is not fully constructed. This feature is only implemented for
+ * input sparsity patterns of type DynamicSparsityPattern.
+ */
+ template <typename SparsityPatternType>
+ void
+ reinit(const IndexSet ¶llel_partitioning,
+ const SparsityPatternType &nontrilinos_sparsity_pattern,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+ /** @} */
+ /**
+ * @name Information on the sparsity pattern
+ */
+ /** @{ */
+
+ /**
+ * Return the state of the sparsity pattern, i.e., whether compress()
+ * needs to be called after an operation requiring data exchange.
+ */
+ bool
+ is_compressed() const;
+
+ /**
+ * Return the maximum number of entries per row on the current processor.
+ */
+ unsigned int
+ max_entries_per_row() const;
+
+ /**
+ * Return the local dimension of the sparsity pattern, i.e. the number of
+ * rows stored on the present MPI process. In the sequential case, this
+ * number is the same as n_rows(), but for parallel matrices it may be
+ * smaller.
+ *
+ * To figure out which elements exactly are stored locally, use
+ * local_range().
+ */
+ unsigned int
+ local_size() const;
+
+ /**
+ * Return a pair of indices indicating which rows of this sparsity pattern
+ * are stored locally. The first number is the index of the first row
+ * stored, the second the index of the one past the last one that is
+ * stored locally. If this is a sequential matrix, then the result will be
+ * the pair (0,n_rows()), otherwise it will be a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<size_type, size_type>
+ local_range() const;
+
+ /**
+ * Return whether @p index is in the local range or not, see also
+ * local_range().
+ */
+ bool
+ in_local_range(const size_type index) const;
+
+ /**
+ * Return the number of nonzero elements of this sparsity pattern.
+ */
+ std::uint64_t
+ n_nonzero_elements() const;
+
+ /**
+ * Return the number of entries in the given row.
+ *
+ * In a parallel context, the row in question may of course not be
+ * stored on the current processor, and in that case it is not
+ * possible to query the number of entries in it. In that case,
+ * the returned value is `static_cast<size_type>(-1)`.
+ */
+ size_type
+ row_length(const size_type row) const;
+
+ /**
+ * Compute the bandwidth of the matrix represented by this structure. The
+ * bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$
+ * represents a nonzero entry of the matrix. Consequently, the maximum
+ * bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.
+ */
+ size_type
+ bandwidth() const;
+
+ /**
+ * Return whether the object is empty. It is empty if no memory is
+ * allocated, which is the same as when both dimensions are zero.
+ */
+ bool
+ empty() const;
+
+ /**
+ * Return whether the index (<i>i,j</i>) exists in the sparsity pattern
+ * (i.e., it may be nonzero) or not.
+ */
+ bool
+ exists(const size_type i, const size_type j) const;
+
+ /**
+ * Return whether a given @p row is stored in the current object
+ * on this process.
+ */
+ bool
+ row_is_stored_locally(const size_type i) const;
+
+ /**
+ * Determine an estimate for the memory consumption (in bytes) of this
+ * object. Currently not implemented for this class.
+ */
+ std::size_t
+ memory_consumption() const;
+
+ /** @} */
+ /**
+ * @name Adding entries
+ */
+ /** @{ */
+ /**
+ * Add the element (<i>i,j</i>) to the sparsity pattern.
+ */
+ void
+ add(const size_type i, const size_type j);
+
+
+ /**
+ * Add several elements in one row to the sparsity pattern.
+ */
+ template <typename ForwardIterator>
+ void
+ add_entries(const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted = false);
+
+ virtual void
+ add_row_entries(
+ const dealii::types::global_dof_index &row,
+ const ArrayView<const dealii::types::global_dof_index> &columns,
+ const bool indices_are_sorted = false) override;
+
+ using SparsityPatternBase::add_entries;
+
+ /** @} */
+ /**
+ * @name Access of underlying Trilinos data
+ */
+ /** @{ */
+
+ /**
+ * Return a Teuchos::RCP to the underlying Trilinos Tpetra::CrsGraph
+ * data that stores the sparsity pattern.
+ */
+ Teuchos::RCP<GraphType>
+ trilinos_sparsity_pattern() const;
+
+ /**
+ * Return a const Teuchos::RCP to the underlying Trilinos Tpetra::Map that
+ * sets the parallel partitioning of the domain space of this sparsity
+ * pattern, i.e., the partitioning of the vectors matrices based on this
+ * sparsity pattern are multiplied with.
+ */
+ Teuchos::RCP<const MapType>
+ domain_partitioner() const;
+
+ /**
+ * Return a const Teuchos::RCP to the underlying Trilinos Tpetra::Map that
+ * sets the partitioning of the range space of this sparsity pattern,
+ * i.e., the partitioning of the vectors that are result from matrix-
+ * vector products.
+ */
+ Teuchos::RCP<const MapType>
+ range_partitioner() const;
+
+ /**
+ * Return the underlying MPI communicator.
+ */
+ MPI_Comm
+ get_mpi_communicator() const;
+
+ /**
+ * Return the underlying Teuchos::MPI communicator.
+ */
+ Teuchos::RCP<const Teuchos::Comm<int>>
+ get_teuchos_mpi_communicator() const;
+
+ /** @} */
+
+ /**
+ * @name Partitioners
+ */
+ /** @{ */
+
+ /**
+ * Return the partitioning of the domain space of this pattern, i.e., the
+ * partitioning of the vectors a matrix based on this sparsity pattern has
+ * to be multiplied with.
+ */
+ IndexSet
+ locally_owned_domain_indices() const;
+
+ /**
+ * Return the partitioning of the range space of this pattern, i.e., the
+ * partitioning of the vectors that are the result from matrix-vector
+ * products from a matrix based on this pattern.
+ */
+ IndexSet
+ locally_owned_range_indices() const;
+
+ /** @} */
+
+ /**
+ * @name Iterators
+ */
+ /** @{ */
+
+ /**
+ * Iterator starting at the first entry.
+ */
+ const_iterator
+ begin() const;
+
+ /**
+ * Final iterator.
+ */
+ const_iterator
+ end() const;
+
+ /**
+ * Iterator starting at the first entry of row @p r.
+ *
+ * Note that if the given row is empty, i.e. does not contain any nonzero
+ * entries, then the iterator returned by this function equals
+ * <tt>end(r)</tt>. Note also that the iterator may not be dereferenceable
+ * in that case.
+ */
+ const_iterator
+ begin(const size_type r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>. It points to the first element past
+ * the end of line @p r, or past the end of the entire sparsity pattern.
+ *
+ * Note that the end iterator is not necessarily dereferenceable. This is
+ * in particular the case if it is the end iterator for the last row of a
+ * matrix.
+ */
+ const_iterator
+ end(const size_type r) const;
+
+ /** @} */
+ /**
+ * @name Input/Output
+ */
+ /** @{ */
+
+ /**
+ * Print (the locally owned part of) the sparsity pattern to the given
+ * stream, using the format <tt>(line,col)</tt>. The optional flag outputs
+ * the sparsity pattern in Trilinos style, where even the according
+ * processor number is printed to the stream, as well as a summary before
+ * actually writing the entries.
+ */
+ void
+ print(std::ostream &out,
+ const bool write_extended_trilinos_info = false) const;
+
+ /**
+ * Print the sparsity of the matrix in a format that <tt>gnuplot</tt>
+ * understands and which can be used to plot the sparsity pattern in a
+ * graphical way. The format consists of pairs <tt>i j</tt> of nonzero
+ * elements, each representing one entry of this matrix, one per line of
+ * the output file. Indices are counted from zero on, as usual. Since
+ * sparsity patterns are printed in the same way as matrices are
+ * displayed, we print the negative of the column index, which means that
+ * the <tt>(0,0)</tt> element is in the top left rather than in the bottom
+ * left corner.
+ *
+ * Print the sparsity pattern in gnuplot by setting the data style to dots
+ * or points and use the <tt>plot</tt> command.
+ */
+ void
+ print_gnuplot(std::ostream &out) const;
+
+ /** @} */
+ /**
+ * @addtogroup Exceptions
+ * @{
+ */
+ /**
+ * Exception
+ */
+ DeclException1(ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ /**
+ * Exception
+ */
+ DeclException2(ExcInvalidIndex,
+ size_type,
+ size_type,
+ << "The entry with index <" << arg1 << ',' << arg2
+ << "> does not exist.");
+
+ /**
+ * Exception
+ */
+ DeclException4(ExcAccessToNonLocalElement,
+ size_type,
+ size_type,
+ size_type,
+ size_type,
+ << "You tried to access element (" << arg1 << '/' << arg2
+ << ')'
+ << " of a distributed matrix, but only rows in range ["
+ << arg3 << ',' << arg4
+ << "] are stored locally and can be accessed.");
+
+ /**
+ * Exception
+ */
+ DeclException2(ExcAccessToNonPresentElement,
+ size_type,
+ size_type,
+ << "You tried to access element (" << arg1 << '/' << arg2
+ << ')' << " of a sparse matrix, but it appears to not"
+ << " exist in the Trilinos sparsity pattern.");
+ /** @} */
+ private:
+ /**
+ * Teuchos::RCP to the user-supplied Tpetra Trilinos mapping of the matrix
+ * columns that assigns parts of the matrix to the individual processes.
+ */
+ Teuchos::RCP<MapType> column_space_map;
+
+ /**
+ * A sparsity pattern object in Trilinos to be used for finite element
+ * based problems which allows for adding non-local elements to the
+ * pattern.
+ */
+ Teuchos::RCP<GraphType> graph;
+
+ /**
+ * A sparsity pattern object for the non-local part of the sparsity
+ * pattern that is going to be sent to the owning processor. Only used
+ * when the particular constructor or reinit method with writable_rows
+ * argument is set
+ */
+ Teuchos::RCP<GraphType> nonlocal_graph;
+
+ // TODO: currently only for double
+ friend class SparseMatrix<double, NodeType>;
+ friend class SparsityPatternIterators::Accessor<NodeType>;
+ friend class SparsityPatternIterators::Iterator<NodeType>;
+ };
+
+
+
+ // ---------------- inline and template functions -----------------
+
+
+# ifndef DOXYGEN
+
+ namespace SparsityPatternIterators
+ {
+ template <typename NodeType>
+ inline Accessor<NodeType>::Accessor(const SparsityPattern<NodeType> *sp,
+ const size_type row,
+ const size_type index)
+ : sparsity_pattern(const_cast<SparsityPattern<NodeType> *>(sp))
+ , a_row(row)
+ , a_index(index)
+ {
+ visit_present_row();
+ }
+
+
+
+ template <typename NodeType>
+ inline typename Accessor<NodeType>::size_type
+ Accessor<NodeType>::row() const
+ {
+ Assert(a_row < sparsity_pattern->n_rows(),
+ ExcBeyondEndOfSparsityPattern());
+ return a_row;
+ }
+
+
+
+ template <typename NodeType>
+ inline typename Accessor<NodeType>::size_type
+ Accessor<NodeType>::column() const
+ {
+ Assert(a_row < sparsity_pattern->n_rows(),
+ ExcBeyondEndOfSparsityPattern());
+ return (*colnum_cache)[a_index];
+ }
+
+
+
+ template <typename NodeType>
+ inline typename Accessor<NodeType>::size_type
+ Accessor<NodeType>::index() const
+ {
+ Assert(a_row < sparsity_pattern->n_rows(),
+ ExcBeyondEndOfSparsityPattern());
+ return a_index;
+ }
+
+
+
+ template <typename NodeType>
+ inline Iterator<NodeType>::Iterator(const SparsityPattern<NodeType> *sp,
+ const size_type row,
+ const size_type index)
+ : accessor(sp, row, index)
+ {}
+
+
+
+ template <typename NodeType>
+ inline Iterator<NodeType>::Iterator(const Iterator<NodeType> &) = default;
+
+
+
+ template <typename NodeType>
+ inline Iterator<NodeType> &
+ Iterator<NodeType>::operator++()
+ {
+ Assert(accessor.a_row < accessor.sparsity_pattern->n_rows(),
+ ExcIteratorPastEnd());
+
+ ++accessor.a_index;
+
+ // If at end of line: do one step, then cycle until we find a row with a
+ // nonzero number of entries that is stored locally.
+ if (accessor.a_index >=
+ static_cast<dealii::types::signed_global_dof_index>(
+ accessor.colnum_cache->size()))
+ {
+ accessor.a_index = 0;
+ ++accessor.a_row;
+
+ while (accessor.a_row <
+ static_cast<dealii::types::signed_global_dof_index>(
+ accessor.sparsity_pattern->n_rows()))
+ {
+ const auto row_length =
+ accessor.sparsity_pattern->row_length(accessor.a_row);
+ if (row_length == 0 ||
+ !accessor.sparsity_pattern->row_is_stored_locally(
+ accessor.a_row))
+ ++accessor.a_row;
+ else
+ break;
+ }
+
+ accessor.visit_present_row();
+ }
+ return *this;
+ }
+
+
+
+ template <typename NodeType>
+ inline Iterator<NodeType>
+ Iterator<NodeType>::operator++(int)
+ {
+ const Iterator<NodeType> old_state = *this;
+ ++(*this);
+ return old_state;
+ }
+
+
+
+ template <typename NodeType>
+ inline const Accessor<NodeType> &
+ Iterator<NodeType>::operator*() const
+ {
+ return accessor;
+ }
+
+
+
+ template <typename NodeType>
+ inline const Accessor<NodeType> *
+ Iterator<NodeType>::operator->() const
+ {
+ return &accessor;
+ }
+
+
+
+ template <typename NodeType>
+ inline bool
+ Iterator<NodeType>::operator==(const Iterator<NodeType> &other) const
+ {
+ return (accessor.a_row == other.accessor.a_row &&
+ accessor.a_index == other.accessor.a_index);
+ }
+
+
+
+ template <typename NodeType>
+ inline bool
+ Iterator<NodeType>::operator!=(const Iterator<NodeType> &other) const
+ {
+ return !(*this == other);
+ }
+
+
+
+ template <typename NodeType>
+ inline bool
+ Iterator<NodeType>::operator<(const Iterator<NodeType> &other) const
+ {
+ return (accessor.row() < other.accessor.row() ||
+ (accessor.row() == other.accessor.row() &&
+ accessor.index() < other.accessor.index()));
+ }
+
+ } // namespace SparsityPatternIterators
+
+
+
+ template <typename NodeType>
+ inline typename SparsityPattern<NodeType>::const_iterator
+ SparsityPattern<NodeType>::begin() const
+ {
+ const size_type first_valid_row = this->local_range().first;
+ return const_iterator(this, first_valid_row, 0);
+ }
+
+
+
+ template <typename NodeType>
+ inline typename SparsityPattern<NodeType>::const_iterator
+ SparsityPattern<NodeType>::end() const
+ {
+ return const_iterator(this, n_rows(), 0);
+ }
+
+
+
+ template <typename NodeType>
+ inline typename SparsityPattern<NodeType>::const_iterator
+ SparsityPattern<NodeType>::begin(const size_type r) const
+ {
+ AssertIndexRange(r, n_rows());
+ if (row_length(r) > 0)
+ return const_iterator(this, r, 0);
+ else
+ return end(r);
+ }
+
+
+
+ template <typename NodeType>
+ inline typename SparsityPattern<NodeType>::const_iterator
+ SparsityPattern<NodeType>::end(const size_type r) const
+ {
+ AssertIndexRange(r, n_rows());
+
+ // place the iterator on the first entry
+ // past this line, or at the end of the
+ // matrix
+ for (size_type i = r + 1;
+ i < static_cast<dealii::types::signed_global_dof_index>(n_rows());
+ ++i)
+ if (row_length(i) > 0)
+ return const_iterator(this, i, 0);
+
+ // if there is no such line, then take the
+ // end iterator of the matrix
+ return end();
+ }
+
+
+
+ template <typename NodeType>
+ inline bool
+ SparsityPattern<NodeType>::in_local_range(const size_type index) const
+ {
+ const TrilinosWrappers::types::int_type begin =
+ graph->getRowMap()->getMinGlobalIndex();
+ const TrilinosWrappers::types::int_type end =
+ graph->getRowMap()->getMaxGlobalIndex() + 1;
+
+ return ((index >= static_cast<size_type>(begin)) &&
+ (index < static_cast<size_type>(end)));
+ }
+
+
+
+ template <typename NodeType>
+ inline bool
+ SparsityPattern<NodeType>::is_compressed() const
+ {
+ return graph->isFillComplete();
+ }
+
+
+
+ template <typename NodeType>
+ inline bool
+ SparsityPattern<NodeType>::empty() const
+ {
+ return ((n_rows() == 0) && (n_cols() == 0));
+ }
+
+
+
+ template <typename NodeType>
+ inline void
+ SparsityPattern<NodeType>::add(const size_type i, const size_type j)
+ {
+ add_entries(i, &j, &j + 1);
+ }
+
+
+
+ template <typename NodeType>
+ template <typename ForwardIterator>
+ inline void
+ SparsityPattern<NodeType>::add_entries(const size_type row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool /*indices_are_sorted*/)
+ {
+ if (begin == end)
+ return;
+
+ // verify that the size of the data type Trilinos expects matches that the
+ // iterator points to. we allow for some slippage between signed and
+ // unsigned and only compare that they are both either 32 or 64 bit. to
+ // write this test properly, not that we cannot compare the size of
+ // '*begin' because 'begin' may be an iterator and '*begin' may be an
+ // accessor class. consequently, we need to somehow get an actual value
+ // from it which we can by evaluating an expression such as when
+ // multiplying the value produced by 2
+ Assert(sizeof(TrilinosWrappers::types::int_type) == sizeof((*begin) * 2),
+ ExcNotImplemented());
+
+ const TrilinosWrappers::types::int_type *col_index_ptr_begin =
+ reinterpret_cast<TrilinosWrappers::types::int_type *>(
+ const_cast<typename std::decay<decltype(*begin)>::type *>(&*begin));
+
+ const TrilinosWrappers::types::int_type *col_index_ptr_end =
+ reinterpret_cast<TrilinosWrappers::types::int_type *>(
+ const_cast<typename std::decay<decltype(*end)>::type *>(&*end));
+
+ // Check at least for the first index that the conversion actually works
+ AssertDimension(*col_index_ptr_begin, *begin);
+ AssertDimension(*col_index_ptr_end, *end);
+ TrilinosWrappers::types::int_type trilinos_row_index = row;
+
+ Teuchos::Array<long long> array(col_index_ptr_begin, col_index_ptr_end);
+
+ if (row_is_stored_locally(row))
+ graph->insertGlobalIndices(trilinos_row_index, array());
+ else if (nonlocal_graph.get() != nullptr)
+ {
+ // this is the case when we have explicitly set the off-processor rows
+ // and want to create a separate matrix object for them (to retain
+ // thread-safety)
+ Assert(nonlocal_graph->getRowMap()->getLocalElement(row) !=
+ Teuchos::OrdinalTraits<
+ dealii::types::signed_global_dof_index>::invalid(),
+ ExcMessage("Attempted to write into off-processor matrix row "
+ "that has not be specified as being writable upon "
+ "initialization"));
+ nonlocal_graph->insertGlobalIndices(trilinos_row_index, array);
+ }
+ else
+ graph->insertGlobalIndices(trilinos_row_index, array);
+ }
+
+
+
+ template <typename NodeType>
+ inline Teuchos::RCP<
+ Tpetra::CrsGraph<int, dealii::types::signed_global_dof_index, NodeType>>
+ SparsityPattern<NodeType>::trilinos_sparsity_pattern() const
+ {
+ return graph;
+ }
+
+
+
+ template <typename NodeType>
+ inline IndexSet
+ SparsityPattern<NodeType>::locally_owned_domain_indices() const
+ {
+ return IndexSet(graph->getDomainMap().getConst());
+ }
+
+
+
+ template <typename NodeType>
+ inline IndexSet
+ SparsityPattern<NodeType>::locally_owned_range_indices() const
+ {
+ return IndexSet(graph->getRangeMap().getConst());
+ }
+
+# endif // DOXYGEN
+ } // namespace TpetraWrappers
+
+} // namespace LinearAlgebra
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+
+#endif // DEAL_II_TRILINOS_WITH_TPETRA
+
+#endif
trilinos_sparse_matrix.cc
trilinos_sparsity_pattern.cc
trilinos_tpetra_communication_pattern.cc
+ trilinos_tpetra_sparse_matrix.cc
+ trilinos_tpetra_sparsity_pattern.cc
trilinos_tpetra_vector.cc
trilinos_vector.cc
)
MatrixBlock<SparseMatrix<S>> &) const;
}
+// ---------------------------------------------------------------------
+//
+// Tpetra:
+//
+// ---------------------------------------------------------------------
+
+for (S : TRILINOS_SCALARS)
+ {
+ template void AffineConstraints<S>::distribute_local_to_global<
+ LinearAlgebra::TpetraWrappers::SparseMatrix<S>,
+ LinearAlgebra::TpetraWrappers::Vector<S>>(
+ const FullMatrix<S> &,
+ const Vector<S> &,
+ const std::vector<AffineConstraints<S>::size_type> &,
+ LinearAlgebra::TpetraWrappers::SparseMatrix<S> &,
+ LinearAlgebra::TpetraWrappers::Vector<S> &,
+ bool,
+ std::integral_constant<bool, false>) const;
+ }
+
+
// ---------------------------------------------------------------------
//
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+# include <deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+# ifndef DOXYGEN
+// explicit instantiations
+namespace LinearAlgebra
+{
+ namespace TpetraWrappers
+ {
+ template class SparseMatrix<double>;
+
+ template void
+ SparseMatrix<double>::reinit(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const dealii::DynamicSparsityPattern &sparsity_pattern,
+ const MPI_Comm communicator,
+ const bool exchange_data);
+
+ } // namespace TpetraWrappers
+} // namespace LinearAlgebra
+# endif // DOXYGEN
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_TRILINOS_WITH_TPETRA
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+# include <deal.II/base/mpi.h>
+# include <deal.II/base/trilinos_utilities.h>
+
+# include <deal.II/lac/dynamic_sparsity_pattern.h>
+# include <deal.II/lac/sparsity_pattern.h>
+# include <deal.II/lac/trilinos_index_access.h>
+# include <deal.II/lac/trilinos_tpetra_sparsity_pattern.h>
+
+# include <limits>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+
+ namespace TpetraWrappers
+ {
+ namespace SparsityPatternIterators
+ {
+ template <typename NodeType>
+ void
+ Accessor<NodeType>::visit_present_row()
+ {
+ // if we are asked to visit the past-the-end line, then simply
+ // release all our caches and go on with life
+ if (static_cast<size_t>(this->a_row) == sparsity_pattern->n_rows())
+ {
+ colnum_cache.reset();
+ return;
+ }
+
+ // otherwise first flush Trilinos caches if necessary
+ if (!sparsity_pattern->is_compressed())
+ sparsity_pattern->compress();
+
+ colnum_cache =
+ std::make_shared<std::vector<dealii::types::signed_global_dof_index>>(
+ sparsity_pattern->row_length(this->a_row));
+
+ if (colnum_cache->size() > 0)
+ {
+ // get a representation of the present row
+ std::size_t ncols;
+ typename Tpetra::CrsGraph<
+ int,
+ dealii::types::signed_global_dof_index,
+ NodeType>::nonconst_global_inds_host_view_type
+ column_indices_view(colnum_cache->data(), colnum_cache->size());
+ sparsity_pattern->graph->getGlobalRowCopy(this->a_row,
+ column_indices_view,
+ ncols);
+ AssertThrow(ncols == colnum_cache->size(), ExcInternalError());
+ }
+ }
+ } // namespace SparsityPatternIterators
+
+
+ // The constructor is actually the only point where we have to check whether
+ // we build a serial or a parallel Trilinos matrix. Actually, it does not
+ // even matter how many threads there are, but only if we use an MPI
+ // compiler or a standard compiler. So, even one thread on a configuration
+ // with MPI will still get a parallel interface.
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern()
+ {
+ column_space_map = Utilities::Trilinos::internal::make_rcp<MapType>(
+ TrilinosWrappers::types::int_type(0),
+ TrilinosWrappers::types::int_type(0),
+ Utilities::Trilinos::tpetra_comm_self());
+ graph =
+ Utilities::Trilinos::internal::make_rcp<GraphType>(column_space_map,
+ column_space_map,
+ 0);
+ graph->fillComplete();
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row)
+ {
+ reinit(m, n, n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row)
+ {
+ reinit(m, n, n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ SparsityPattern<NodeType> &&other) noexcept
+ : SparsityPatternBase(std::move(other))
+ , column_space_map(std::move(other.column_space_map))
+ , graph(std::move(other.graph))
+ , nonlocal_graph(std::move(other.nonlocal_graph))
+ {}
+
+
+
+ // Copy function only works if the sparsity pattern is empty.
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const SparsityPattern<NodeType> &input_sparsity)
+ : SparsityPatternBase(input_sparsity)
+ , column_space_map(Utilities::Trilinos::internal::make_rcp<MapType>(
+ 0,
+ 0,
+ Utilities::Trilinos::tpetra_comm_self()))
+ , graph(
+ Utilities::Trilinos::internal::make_rcp<GraphType>(column_space_map,
+ column_space_map,
+ 0))
+ {
+ (void)input_sparsity;
+ Assert(input_sparsity.n_rows() == 0,
+ ExcMessage(
+ "Copy constructor only works for empty sparsity patterns."));
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const size_type n_entries_per_row)
+ {
+ reinit(parallel_partitioning,
+ parallel_partitioning,
+ communicator,
+ n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row)
+ {
+ reinit(parallel_partitioning,
+ parallel_partitioning,
+ communicator,
+ n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const size_type n_entries_per_row)
+ {
+ reinit(row_parallel_partitioning,
+ col_parallel_partitioning,
+ communicator,
+ n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row)
+ {
+ reinit(row_parallel_partitioning,
+ col_parallel_partitioning,
+ communicator,
+ n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType>::SparsityPattern(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const IndexSet &writable_rows,
+ const MPI_Comm communicator,
+ const size_type n_max_entries_per_row)
+ {
+ reinit(row_parallel_partitioning,
+ col_parallel_partitioning,
+ writable_rows,
+ communicator,
+ n_max_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::reinit(const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row)
+ {
+ reinit(complete_index_set(m),
+ complete_index_set(n),
+ MPI_COMM_SELF,
+ n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::reinit(
+ const size_type m,
+ const size_type n,
+ const std::vector<size_type> &n_entries_per_row)
+ {
+ reinit(complete_index_set(m),
+ complete_index_set(n),
+ MPI_COMM_SELF,
+ n_entries_per_row);
+ }
+
+
+
+ namespace
+ {
+ template <typename NodeType>
+ using size_type = typename SparsityPattern<NodeType>::size_type;
+
+ template <typename NodeType>
+ using MapType =
+ Tpetra::Map<int, dealii::types::signed_global_dof_index, NodeType>;
+
+ template <typename NodeType>
+ using GraphType =
+ Tpetra::CrsGraph<int, dealii::types::signed_global_dof_index, NodeType>;
+
+ template <typename NodeType>
+ void
+ reinit_sp(const Teuchos::RCP<MapType<NodeType>> &row_map,
+ const Teuchos::RCP<MapType<NodeType>> &col_map,
+ const size_type<NodeType> n_entries_per_row,
+ Teuchos::RCP<MapType<NodeType>> &column_space_map,
+ Teuchos::RCP<GraphType<NodeType>> &graph,
+ Teuchos::RCP<GraphType<NodeType>> &nonlocal_graph)
+ {
+ Assert(row_map->isOneToOne(),
+ ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+ Assert(col_map->isOneToOne(),
+ ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+
+ nonlocal_graph.reset();
+ graph.reset();
+ column_space_map = col_map;
+
+ // for more than one processor, need to specify only row map first and
+ // let the matrix entries decide about the column map (which says which
+ // columns are present in the matrix, not to be confused with the
+ // col_map that tells how the domain dofs of the matrix will be
+ // distributed). for only one processor, we can directly assign the
+ // columns as well. If we use a recent Trilinos version, we can also
+ // require building a non-local graph which gives us thread-safe
+ // initialization.
+ graph = Utilities::Trilinos::internal::make_rcp<GraphType<NodeType>>(
+ row_map, row_map, n_entries_per_row);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ reinit_sp(const Teuchos::RCP<MapType<NodeType>> &row_map,
+ const Teuchos::RCP<MapType<NodeType>> &col_map,
+ const std::vector<size_type<NodeType>> &n_entries_per_row,
+ Teuchos::RCP<MapType<NodeType>> &column_space_map,
+ Teuchos::RCP<GraphType<NodeType>> &graph,
+ Teuchos::RCP<GraphType<NodeType>> &nonlocal_graph)
+ {
+ Assert(row_map->isOneToOne(),
+ ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+ Assert(col_map->isOneToOne(),
+ ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+
+ // release memory before reallocation
+ nonlocal_graph.reset();
+ graph.reset();
+ AssertDimension(n_entries_per_row.size(),
+ row_map->getGlobalNumElements());
+
+ column_space_map = col_map;
+
+ // Translate the vector of row lengths into one that only stores
+ // those entries that related to the locally stored rows of the matrix:
+ Kokkos::DualView<size_t *> local_entries_per_row(
+ "local_entries_per_row",
+ row_map->getMaxGlobalIndex() - row_map->getMinGlobalIndex());
+
+ auto local_entries_per_row_host =
+ local_entries_per_row.view<Kokkos::DefaultHostExecutionSpace>();
+
+ std::uint64_t total_size = 0;
+ for (unsigned int i = 0; i < local_entries_per_row.extent(0); ++i)
+ {
+ local_entries_per_row_host(i) =
+ n_entries_per_row[row_map->getMinGlobalIndex() + i];
+ total_size += local_entries_per_row_host[i];
+ }
+ local_entries_per_row.modify<Kokkos::DefaultHostExecutionSpace>();
+ local_entries_per_row.sync<Kokkos::DefaultExecutionSpace>();
+
+ AssertThrow(
+ total_size < static_cast<std::uint64_t>(
+ std::numeric_limits<
+ dealii::types::signed_global_dof_index>::max()),
+ ExcMessage(
+ "You are requesting to store more elements than global ordinal type allows."));
+
+ graph = Utilities::Trilinos::internal::make_rcp<GraphType<NodeType>>(
+ row_map, col_map, local_entries_per_row);
+ }
+
+
+
+ template <typename SparsityPatternType, typename NodeType>
+ void
+ reinit_sp(const Teuchos::RCP<MapType<NodeType>> &row_map,
+ const Teuchos::RCP<MapType<NodeType>> &col_map,
+ const SparsityPatternType &sp,
+ [[maybe_unused]] const bool exchange_data,
+ Teuchos::RCP<MapType<NodeType>> &column_space_map,
+ Teuchos::RCP<GraphType<NodeType>> &graph,
+ Teuchos::RCP<GraphType<NodeType>> &nonlocal_graph)
+ {
+ nonlocal_graph.reset();
+ graph.reset();
+
+ AssertDimension(sp.n_rows(), row_map->getGlobalNumElements());
+ AssertDimension(sp.n_cols(), col_map->getGlobalNumElements());
+
+ column_space_map =
+ Utilities::Trilinos::internal::make_rcp<MapType<NodeType>>(*col_map);
+
+ Assert(row_map->isContiguous() == true,
+ ExcMessage(
+ "This function only works if the row map is contiguous."));
+
+ const size_type<NodeType> first_row = row_map->getMinGlobalIndex(),
+ last_row = row_map->getMaxGlobalIndex() + 1;
+ Teuchos::Array<size_t> n_entries_per_row(last_row - first_row);
+
+ for (size_type<NodeType> row = first_row; row < last_row; ++row)
+ n_entries_per_row[row - first_row] = sp.row_length(row);
+
+ AssertThrow(
+ std::accumulate(n_entries_per_row.begin(),
+ n_entries_per_row.end(),
+ std::uint64_t(0)) <
+ static_cast<std::uint64_t>(std::numeric_limits<int>::max()),
+ ExcMessage(
+ "The TrilinosWrappers use Tpetra internally, and "
+ "Trilinos/Tpetra was compiled with 'local ordinate = int'. "
+ "Therefore, 'signed int' is used to represent local indices, "
+ "and only 2,147,483,647 nonzero matrix entries can be stored "
+ "on a single process, but you are requesting more than "
+ "that. Either use more MPI processes or recompile Trilinos "
+ "with 'local ordinate = long long' "));
+
+ if (row_map->getComm()->getSize() > 1)
+ graph = Utilities::Trilinos::internal::make_rcp<GraphType<NodeType>>(
+ row_map, n_entries_per_row());
+ else
+ graph = Utilities::Trilinos::internal::make_rcp<GraphType<NodeType>>(
+ row_map, col_map, n_entries_per_row());
+
+ AssertDimension(sp.n_rows(), graph->getGlobalNumRows());
+ AssertDimension(sp.n_cols(), graph->getGlobalNumEntries());
+
+ std::vector<TrilinosWrappers::types::int_type> row_indices;
+
+ for (size_type<NodeType> row = first_row; row < last_row; ++row)
+ {
+ const TrilinosWrappers::types::int_type row_length =
+ sp.row_length(row);
+ if (row_length == 0)
+ continue;
+
+ row_indices.resize(row_length, -1);
+ {
+ typename SparsityPatternType::iterator p = sp.begin(row);
+ // avoid incrementing p over the end of the current row because
+ // it is slow for DynamicSparsityPattern in parallel
+ for (int col = 0; col < row_length;)
+ {
+ row_indices[col++] = p->column();
+ if (col < row_length)
+ ++p;
+ }
+ }
+ graph->insertGlobalIndices(row, row_length, row_indices.data());
+ }
+
+ graph->globalAssemble();
+ }
+ } // namespace
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::reinit(const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const size_type n_entries_per_row)
+ {
+ SparsityPatternBase::resize(parallel_partitioning.size(),
+ parallel_partitioning.size());
+ Teuchos::RCP<MapType> map =
+ parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ reinit_sp(
+ map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::reinit(
+ const IndexSet ¶llel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row)
+ {
+ SparsityPatternBase::resize(parallel_partitioning.size(),
+ parallel_partitioning.size());
+ Teuchos::RCP<MapType> map =
+ parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ reinit_sp(
+ map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::reinit(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const size_type n_entries_per_row)
+ {
+ SparsityPatternBase::resize(row_parallel_partitioning.size(),
+ col_parallel_partitioning.size());
+ Teuchos::RCP<MapType> row_map =
+ row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ Teuchos::RCP<MapType> col_map =
+ col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ reinit_sp(row_map,
+ col_map,
+ n_entries_per_row,
+ column_space_map,
+ graph,
+ nonlocal_graph);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::reinit(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm communicator,
+ const std::vector<size_type> &n_entries_per_row)
+ {
+ SparsityPatternBase::resize(row_parallel_partitioning.size(),
+ col_parallel_partitioning.size());
+ Teuchos::RCP<MapType> row_map =
+ row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ Teuchos::RCP<MapType> col_map =
+ col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ reinit_sp(row_map,
+ col_map,
+ n_entries_per_row,
+ column_space_map,
+ graph,
+ nonlocal_graph);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::reinit(const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const IndexSet &writable_rows,
+ const MPI_Comm communicator,
+ const size_type n_entries_per_row)
+ {
+ SparsityPatternBase::resize(row_parallel_partitioning.size(),
+ col_parallel_partitioning.size());
+ Teuchos::RCP<MapType> row_map =
+ row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ Teuchos::RCP<MapType> col_map =
+ col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ reinit_sp(row_map,
+ col_map,
+ n_entries_per_row,
+ column_space_map,
+ graph,
+ nonlocal_graph);
+
+ IndexSet nonlocal_partitioner = writable_rows;
+ AssertDimension(nonlocal_partitioner.size(),
+ row_parallel_partitioning.size());
+# ifdef DEBUG
+ {
+ IndexSet tmp = writable_rows & row_parallel_partitioning;
+ Assert(tmp == row_parallel_partitioning,
+ ExcMessage(
+ "The set of writable rows passed to this method does not "
+ "contain the locally owned rows, which is not allowed."));
+ }
+# endif
+ nonlocal_partitioner.subtract_set(row_parallel_partitioning);
+ if (Utilities::MPI::n_mpi_processes(communicator) > 1)
+ {
+ Teuchos::RCP<MapType> nonlocal_map =
+ nonlocal_partitioner.make_tpetra_map_rcp(communicator, true);
+ nonlocal_graph =
+ Utilities::Trilinos::internal::make_rcp<GraphType>(nonlocal_map,
+ col_map,
+ 0);
+ }
+ else
+ Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
+ }
+
+
+
+ template <typename NodeType>
+ template <typename SparsityPatternType>
+ void
+ SparsityPattern<NodeType>::reinit(
+ const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const SparsityPatternType &nontrilinos_sparsity_pattern,
+ const MPI_Comm communicator,
+ const bool exchange_data)
+ {
+ SparsityPatternBase::resize(row_parallel_partitioning.size(),
+ col_parallel_partitioning.size());
+ Teuchos::RCP<MapType> row_map =
+ row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ Teuchos::RCP<MapType> col_map =
+ col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ reinit_sp(row_map,
+ col_map,
+ nontrilinos_sparsity_pattern,
+ exchange_data,
+ column_space_map,
+ graph,
+ nonlocal_graph);
+ }
+
+
+
+ template <typename NodeType>
+ template <typename SparsityPatternType>
+ void
+ SparsityPattern<NodeType>::reinit(
+ const IndexSet ¶llel_partitioning,
+ const SparsityPatternType &nontrilinos_sparsity_pattern,
+ const MPI_Comm communicator,
+ const bool exchange_data)
+ {
+ AssertDimension(nontrilinos_sparsity_pattern.n_rows(),
+ parallel_partitioning.size());
+ AssertDimension(nontrilinos_sparsity_pattern.n_cols(),
+ parallel_partitioning.size());
+ SparsityPatternBase::resize(parallel_partitioning.size(),
+ parallel_partitioning.size());
+ Teuchos::RCP<MapType> map =
+ parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ reinit_sp(map,
+ map,
+ nontrilinos_sparsity_pattern,
+ exchange_data,
+ column_space_map,
+ graph,
+ nonlocal_graph);
+ }
+
+
+
+ template <typename NodeType>
+ SparsityPattern<NodeType> &
+ SparsityPattern<NodeType>::operator=(const SparsityPattern<NodeType> &)
+ {
+ Assert(false, ExcNotImplemented());
+ return *this;
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::copy_from(const SparsityPattern<NodeType> &sp)
+ {
+ SparsityPatternBase::resize(sp.n_rows(), sp.n_cols());
+ column_space_map =
+ Utilities::Trilinos::internal::make_rcp<MapType>(*sp.column_space_map);
+ graph = Utilities::Trilinos::internal::make_rcp<GraphType>(*sp.graph);
+
+ if (sp.nonlocal_graph.get() != nullptr)
+ nonlocal_graph = Utilities::Trilinos::internal::make_rcp<GraphType>(
+ *sp.nonlocal_graph);
+ else
+ nonlocal_graph.reset();
+ }
+
+
+
+ template <typename NodeType>
+ template <typename SparsityPatternType>
+ void
+ SparsityPattern<NodeType>::copy_from(const SparsityPatternType &sp)
+ {
+ SparsityPatternBase::resize(sp.n_rows(), sp.n_cols());
+ Teuchos::RCP<MapType> rows =
+ Utilities::Trilinos::internal::make_rcp<MapType>(
+ TrilinosWrappers::types::int_type(sp.n_rows()),
+ 0,
+ Utilities::Trilinos::tpetra_comm_self());
+ Teuchos::RCP<MapType> columns =
+ Utilities::Trilinos::internal::make_rcp<MapType>(
+ TrilinosWrappers::types::int_type(sp.n_cols()),
+ 0,
+ Utilities::Trilinos::tpetra_comm_self());
+
+ reinit_sp(
+ rows, columns, sp, false, column_space_map, graph, nonlocal_graph);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::clear()
+ {
+ SparsityPatternBase::resize(0, 0);
+ // When we clear the matrix, reset
+ // the pointer and generate an
+ // empty sparsity pattern.
+ column_space_map = Utilities::Trilinos::internal::make_rcp<MapType>(
+ TrilinosWrappers::types::int_type(0),
+ TrilinosWrappers::types::int_type(0),
+ Utilities::Trilinos::tpetra_comm_self());
+ graph =
+ Utilities::Trilinos::internal::make_rcp<GraphType>(column_space_map,
+ column_space_map,
+ 0);
+ graph->fillComplete();
+
+ nonlocal_graph.reset();
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::compress()
+ {
+ Assert(column_space_map.get(), ExcInternalError());
+ if (nonlocal_graph.get() != nullptr)
+ {
+ if (nonlocal_graph->getRowMap()->getLocalNumElements() > 0 &&
+ column_space_map->getGlobalNumElements() > 0)
+ {
+ // Insert dummy element at (row, column) that corresponds to row 0
+ // in local index counting.
+ TrilinosWrappers::types::int_type row =
+ nonlocal_graph->getRowMap()->getGlobalElement(0);
+ TrilinosWrappers::types::int_type column = 0;
+
+ // in case we have a square sparsity pattern, add the entry on the
+ // diagonal
+ if (column_space_map->getGlobalNumElements() ==
+ graph->getRangeMap()->getGlobalNumElements())
+ column = row;
+ // if not, take a column index that we have ourselves since we
+ // know for sure it is there (and it will not create spurious
+ // messages to many ranks like putting index 0 on many processors)
+ else if (column_space_map->getLocalNumElements() > 0)
+ column = column_space_map->getGlobalElement(0);
+ nonlocal_graph->insertGlobalIndices(row, 1, &column);
+ }
+ Assert(nonlocal_graph->getRowMap()->getLocalNumElements() == 0 ||
+ column_space_map->getGlobalNumElements() == 0,
+ ExcInternalError());
+
+ nonlocal_graph->fillComplete(column_space_map, graph->getRangeMap());
+ graph->fillComplete(column_space_map, graph->getRangeMap());
+ }
+ else
+ {
+ graph->globalAssemble();
+ }
+
+ // Check consistency between the sizes set at the beginning and what
+ // Trilinos stores:
+ using namespace deal_II_exceptions::internals;
+ Assert(compare_for_equality(n_rows(), graph->getGlobalNumEntries()),
+ ExcInternalError());
+ Assert(compare_for_equality(n_cols(), graph->getGlobalNumEntries()),
+ ExcInternalError());
+ }
+
+
+
+ template <typename NodeType>
+ bool
+ SparsityPattern<NodeType>::row_is_stored_locally(const size_type i) const
+ {
+ return graph->getRowMap()->getLocalElement(i) !=
+ Teuchos::OrdinalTraits<int>::invalid();
+ }
+
+
+
+ template <typename NodeType>
+ bool
+ SparsityPattern<NodeType>::exists(const size_type i,
+ const size_type j) const
+ {
+ if (!row_is_stored_locally(i))
+ return false;
+
+ // Extract local indices in the matrix.
+ const auto trilinos_i = graph->getRowMap()->getLocalElement(i);
+ const auto trilinos_j = graph->getColMap()->getLocalElement(j);
+
+ typename GraphType::local_inds_host_view_type col_indices;
+
+ // Generate the view.
+ graph->getLocalRowView(trilinos_i, col_indices);
+
+ // Search the index
+ const size_type local_col_index =
+ std::find(col_indices.data(),
+ col_indices.data() + col_indices.size(),
+ trilinos_j) -
+ col_indices.data();
+
+ return static_cast<size_t>(local_col_index) != col_indices.size();
+ }
+
+
+
+ template <typename NodeType>
+ typename SparsityPattern<NodeType>::size_type
+ SparsityPattern<NodeType>::bandwidth() const
+ {
+ size_type local_b = 0;
+ for (int i = 0; i < static_cast<int>(local_size()); ++i)
+ {
+ typename GraphType::local_inds_host_view_type indices;
+ graph->getLocalRowView(i, indices);
+ const auto num_entries = indices.size();
+ for (unsigned int j = 0; j < static_cast<unsigned int>(num_entries);
+ ++j)
+ {
+ if (static_cast<size_type>(std::abs(i - indices[j])) > local_b)
+ local_b = std::abs(i - indices[j]);
+ }
+ }
+
+ TrilinosWrappers::types::int_type global_b =
+ Utilities::MPI::max(local_b,
+ Utilities::Trilinos::teuchos_comm_to_mpi_comm(
+ graph->getComm()));
+ return static_cast<size_type>(global_b);
+ }
+
+
+
+ template <typename NodeType>
+ unsigned int
+ SparsityPattern<NodeType>::local_size() const
+ {
+ return graph->getLocalNumRows();
+ }
+
+
+
+ template <typename NodeType>
+ std::pair<typename SparsityPattern<NodeType>::size_type,
+ typename SparsityPattern<NodeType>::size_type>
+ SparsityPattern<NodeType>::local_range() const
+ {
+ const size_type begin = graph->getRowMap()->getMinGlobalIndex();
+ const size_type end = graph->getRowMap()->getMaxGlobalIndex() + 1;
+
+ return {begin, end};
+ }
+
+
+
+ template <typename NodeType>
+ std::uint64_t
+ SparsityPattern<NodeType>::n_nonzero_elements() const
+ {
+ return graph->getGlobalNumEntries();
+ }
+
+
+
+ template <typename NodeType>
+ unsigned int
+ SparsityPattern<NodeType>::max_entries_per_row() const
+ {
+ return graph->getLocalMaxNumRowEntries();
+ }
+
+
+
+ template <typename NodeType>
+ typename SparsityPattern<NodeType>::size_type
+ SparsityPattern<NodeType>::row_length(const size_type row) const
+ {
+ Assert(row < (size_type)n_rows(), ExcInternalError());
+
+ // Get a representation of the where the present row is located on
+ // the current processor
+ TrilinosWrappers::types::int_type local_row =
+ graph->getRowMap()->getLocalElement(row);
+
+ // On the processor who owns this row, we'll have a non-negative
+ // value for `local_row` and can ask for the length of the row.
+ if (local_row >= 0)
+ return graph->getNumEntriesInLocalRow(local_row);
+ else
+ return static_cast<size_type>(-1);
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::add_row_entries(
+ const dealii::types::global_dof_index &row,
+ const ArrayView<const dealii::types::global_dof_index> &columns,
+ const bool indices_are_sorted)
+ {
+ add_entries(row, columns.begin(), columns.end(), indices_are_sorted);
+ }
+
+
+
+ template <typename NodeType>
+ Teuchos::RCP<const MapType<NodeType>>
+ SparsityPattern<NodeType>::domain_partitioner() const
+ {
+ return graph->getDomainMap();
+ }
+
+
+
+ template <typename NodeType>
+ Teuchos::RCP<const MapType<NodeType>>
+ SparsityPattern<NodeType>::range_partitioner() const
+ {
+ return graph->getRangeMap();
+ }
+
+
+
+ template <typename NodeType>
+ MPI_Comm
+ SparsityPattern<NodeType>::get_mpi_communicator() const
+ {
+ return Utilities::Trilinos::teuchos_comm_to_mpi_comm(
+ graph->getRangeMap()->getComm());
+ }
+
+
+
+ template <typename NodeType>
+ Teuchos::RCP<const Teuchos::Comm<int>>
+ SparsityPattern<NodeType>::get_teuchos_mpi_communicator() const
+ {
+ return graph->getRangeMap()->getComm();
+ }
+
+
+
+ // As of now, no particularly neat
+ // output is generated in case of
+ // multiple processors.
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::print(
+ std::ostream &out,
+ const bool write_extended_trilinos_info) const
+ {
+ if (write_extended_trilinos_info)
+ out << *graph;
+ else
+ {
+ for (unsigned int i = 0; i < graph->getLocalNumRows(); ++i)
+ {
+ typename GraphType::local_inds_host_view_type indices;
+ graph->getLocalRowView(i, indices);
+ int num_entries = indices.size();
+ for (int j = 0; j < num_entries; ++j)
+ out << "(" << graph->getRowMap()->getGlobalElement(i) << ","
+ << graph->getColMap()->getGlobalElement(indices[j]) << ") "
+ << std::endl;
+ }
+ }
+
+ AssertThrow(out.fail() == false, ExcIO());
+ }
+
+
+
+ template <typename NodeType>
+ void
+ SparsityPattern<NodeType>::print_gnuplot(std::ostream &out) const
+ {
+ Assert(graph->isFillComplete() == true, ExcInternalError());
+ for (dealii::types::signed_global_dof_index row = 0; row < local_size();
+ ++row)
+ {
+ typename GraphType::local_inds_host_view_type indices;
+ graph->getLocalRowView(row, indices);
+ int num_entries = indices.size();
+
+ Assert(num_entries >= 0, ExcInternalError());
+ // avoid sign comparison warning
+ const dealii::types::signed_global_dof_index num_entries_ =
+ num_entries;
+ for (dealii::types::signed_global_dof_index j = 0; j < num_entries_;
+ ++j)
+ // while matrix entries are usually
+ // written (i,j), with i vertical and
+ // j horizontal, gnuplot output is
+ // x-y, that is we have to exchange
+ // the order of output
+ out << static_cast<int>(
+ graph->getColMap()->getGlobalElement(indices[j]))
+ << " "
+ << -static_cast<int>(graph->getRowMap()->getGlobalElement(row))
+ << std::endl;
+ }
+
+ AssertThrow(out.fail() == false, ExcIO());
+ }
+
+ // TODO: Implement!
+ template <typename NodeType>
+ std::size_t
+ SparsityPattern<NodeType>::memory_consumption() const
+ {
+ Assert(false, ExcNotImplemented());
+ return 0;
+ }
+
+
+# ifndef DOXYGEN
+ // explicit instantiations
+ template class SparsityPattern<
+ Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>;
+
+ template void
+ SparsityPattern<Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>::
+ copy_from(const dealii::SparsityPattern &);
+ template void
+ SparsityPattern<Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>::
+ copy_from(const dealii::DynamicSparsityPattern &);
+
+ template void
+ SparsityPattern<Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>::
+ reinit(const IndexSet &,
+ const dealii::SparsityPattern &,
+ const MPI_Comm,
+ bool);
+ template void
+ SparsityPattern<Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>::
+ reinit(const IndexSet &,
+ const dealii::DynamicSparsityPattern &,
+ const MPI_Comm,
+ bool);
+
+
+ template void
+ SparsityPattern<Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>::
+ reinit(const IndexSet &,
+ const IndexSet &,
+ const dealii::SparsityPattern &,
+ const MPI_Comm,
+ bool);
+ template void
+ SparsityPattern<Tpetra::KokkosClassic::DefaultNode::DefaultNodeType>::
+ reinit(const IndexSet &,
+ const IndexSet &,
+ const dealii::DynamicSparsityPattern &,
+ const MPI_Comm,
+ bool);
+# endif
+
+ } // namespace TpetraWrappers
+
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_TRILINOS_WITH_TPETRA