From: Martin Kronbichler Date: Fri, 13 Nov 2009 15:40:05 +0000 (+0000) Subject: Apply some updates that have been developed in the distributed grid branch: reinit... X-Git-Tag: v8.0.0~6817 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=06a6785c02f279c0d2efc03767ee3740a87659cd;p=dealii.git Apply some updates that have been developed in the distributed grid branch: reinit option with IndexSet for Trilinos vectors and matrices. Rearrangements of header files for Trilinos and PETSc. git-svn-id: https://svn.dealii.org/trunk@20107 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/base/include/base/index_set.h b/deal.II/base/include/base/index_set.h index b310ade67f..e774eac9ac 100644 --- a/deal.II/base/include/base/index_set.h +++ b/deal.II/base/include/base/index_set.h @@ -19,6 +19,16 @@ #include #include +#ifdef DEAL_II_USE_TRILINOS +# include +#endif + +#if defined(DEAL_II_COMPILER_SUPPORTS_MPI) || defined(DEAL_II_USE_PETSC) +#include +#else +typedef int MPI_Comm; +#define MPI_COMM_WORLD 0 +#endif DEAL_II_NAMESPACE_OPEN @@ -150,6 +160,68 @@ class IndexSet */ void compress () const; +#ifdef DEAL_II_USE_TRILINOS + /** + * Given an MPI communicator, + * create a Trilinos map object + * that represents a distribution + * of vector elements or matrix + * rows in which we will locally + * store those elements or rows + * for which we store the index + * in the current index set, and + * all the other elements/rows + * elsewhere on one of the other + * MPI processes. + * + * The last argument only plays a + * role if the communicator is a + * parallel one, distributing + * computations across multiple + * processors. In that case, if + * the last argument is false, + * then it is assumed that the + * index sets this function is + * called on on all processors + * are mutually exclusive but + * together enumerate each index + * exactly once. In other words, + * if you call this function on + * two processors, then the index + * sets this function is called + * with must together have all + * possible indices from zero to + * size()-1, and no index must + * appear in both index + * sets. This corresponds, for + * example, to the case where we + * want to split the elements of + * vectors into unique subsets to + * be stored on different + * processors -- no element + * should be owned by more than + * one processor, but each + * element must be owned by one. + * + * On the other hand, if the + * second argument is true, then + * the index sets can be + * overlapping, though they still + * need to contain each index + * exactly once on all processors + * taken together. This is a + * useful operation if we want to + * create vectors that not only + * contain the locally owned + * indices, but for example also + * the elements that correspond + * to degrees of freedom located + * on ghost cells. + */ + Epetra_Map make_trilinos_map (const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool overlapping = false) const; +#endif + private: /** * A type that denotes the half @@ -332,7 +404,7 @@ IndexSet::add_indices (const ForwardIterator &begin, // range. if some of them happen to // be consecutive, merge them to a // range - for (ForwardIterator p=begin; p +inline +QProjector::DataSetDescriptor:: +DataSetDescriptor (const unsigned int dataset_offset) + : + dataset_offset (dataset_offset) +{} + + +template +inline +QProjector::DataSetDescriptor:: +DataSetDescriptor () + : + dataset_offset (numbers::invalid_unsigned_int) +{} + + + +template +typename QProjector::DataSetDescriptor +QProjector::DataSetDescriptor::cell () +{ + return 0; +} + + + +template +inline +QProjector::DataSetDescriptor::operator unsigned int () const +{ + return dataset_offset; +} + + /* -------------- declaration of explicit specializations ------------- */ #ifndef DOXYGEN + template <> void QProjector<1>::project_to_face (const Quadrature<0> &, diff --git a/deal.II/base/include/base/quadrature.h b/deal.II/base/include/base/quadrature.h index 5cd81bd057..fa3829aa8e 100644 --- a/deal.II/base/include/base/quadrature.h +++ b/deal.II/base/include/base/quadrature.h @@ -370,7 +370,7 @@ class QIterated : public Quadrature #ifndef DOXYGEN -// ---------------------------------------------------------------------- +// ------------------- inline and template functions ---------------- template @@ -427,7 +427,6 @@ Quadrature::get_weights () const - /* -------------- declaration of explicit specializations ------------- */ template <> diff --git a/deal.II/base/source/index_set.cc b/deal.II/base/source/index_set.cc index 70e99070b0..593c6ee48f 100644 --- a/deal.II/base/source/index_set.cc +++ b/deal.II/base/source/index_set.cc @@ -11,8 +11,16 @@ // //--------------------------------------------------------------------------- + #include +#ifdef DEAL_II_USE_TRILINOS +# ifdef DEAL_II_COMPILER_SUPPORTS_MPI +# include +# endif +# include +#endif + DEAL_II_NAMESPACE_OPEN void @@ -82,4 +90,50 @@ IndexSet::compress () const } + +#ifdef DEAL_II_USE_TRILINOS + +Epetra_Map +IndexSet::make_trilinos_map (const MPI_Comm &communicator, + const bool overlapping) const +{ + compress (); + + if ((is_contiguous() == true) && (!overlapping)) + return Epetra_Map (size(), + n_elements(), + 0, +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + Epetra_MpiComm(communicator)); +#else + Epetra_SerialComm()); +#endif + else + { + std::vector indices; + indices.reserve(n_elements()); + for (std::vector::iterator + i = ranges.begin(); + i != ranges.end(); + ++i) + for (unsigned int j=i->begin; jend; ++j) + indices.push_back (j); + Assert (indices.size() == n_elements(), ExcInternalError()); + + return Epetra_Map (-1, + n_elements(), + &indices[0], + 0, +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + Epetra_MpiComm(communicator)); +#else + Epetra_SerialComm()); +#endif + } +} + + +#endif + + DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/base/source/quadrature.cc b/deal.II/base/source/quadrature.cc index 5e104b9650..afa20c62e6 100644 --- a/deal.II/base/source/quadrature.cc +++ b/deal.II/base/source/quadrature.cc @@ -1045,13 +1045,6 @@ QProjector::project_to_line( } -template -typename QProjector::DataSetDescriptor -QProjector::DataSetDescriptor::cell () -{ - return 0; -} - template typename QProjector::DataSetDescriptor @@ -1414,30 +1407,6 @@ subface (const unsigned int face_no, } -template -QProjector::DataSetDescriptor::operator unsigned int () const -{ - return dataset_offset; -} - - - -template -QProjector::DataSetDescriptor:: -DataSetDescriptor (const unsigned int dataset_offset) - : - dataset_offset (dataset_offset) -{} - - -template -QProjector::DataSetDescriptor:: -DataSetDescriptor () - : - dataset_offset (numbers::invalid_unsigned_int) -{} - - template Quadrature diff --git a/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h b/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h index 494f46c87c..677fdf5ec0 100644 --- a/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h +++ b/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -119,10 +120,17 @@ class CompressedSimpleSparsityPattern : public Subscriptor /** * Initialize a rectangular * matrix with @p m rows and - * @p n columns. + * @p n columns. The @p rowset + * restricts the storage to + * elements in rows of this set. + * Adding elements outside of + * this set has no effect. The + * default argument keeps all + * entries. */ CompressedSimpleSparsityPattern (const unsigned int m, - const unsigned int n); + const unsigned int n, + const IndexSet & rowset = IndexSet()); /** * Initialize a square matrix of @@ -146,10 +154,17 @@ class CompressedSimpleSparsityPattern : public Subscriptor * matrix with @p m rows and * @p n columns, with at most * max_entries_per_row() nonzero - * entries per row. + * entries per row. The @p rowset + * restricts the storage to + * elements in rows of this set. + * Adding elements outside of + * this set has no effect. The + * default argument keeps all + * entries. */ void reinit (const unsigned int m, - const unsigned int n); + const unsigned int n, + const IndexSet & rowset = IndexSet()); /** * Since this object is kept @@ -270,7 +285,12 @@ class CompressedSimpleSparsityPattern : public Subscriptor unsigned int n_cols () const; /** - * Number of entries in a specific row. + * Number of entries in a + * specific row. This function + * can only be called if the + * given row is a member of the + * index set of rows that we want + * to store. */ unsigned int row_length (const unsigned int row) const; @@ -298,8 +318,17 @@ class CompressedSimpleSparsityPattern : public Subscriptor */ unsigned int n_nonzero_elements () const; + /** + * Return the IndexSet that sets which + * rows are active on the current + * processor. It corresponds to the + * IndexSet given to this class in the + * constructor or in the reinit function. + */ + const IndexSet & row_index_set () const; + /** - * Return whether this object stores only + * return whether this object stores only * those entries that have been added * explicitly, or if the sparsity pattern * contains elements that have been added @@ -316,7 +345,12 @@ class CompressedSimpleSparsityPattern : public Subscriptor static bool stores_only_added_elements (); - + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. + */ + unsigned int memory_consumption () const; private: /** @@ -331,6 +365,13 @@ class CompressedSimpleSparsityPattern : public Subscriptor */ unsigned int cols; + /** + * A set that contains the valid rows. + */ + + IndexSet rowset; + + /** * Store some data for each row * describing which entries of this row @@ -369,6 +410,11 @@ class CompressedSimpleSparsityPattern : public Subscriptor void add_entries (ForwardIterator begin, ForwardIterator end, const bool indices_are_sorted); + + /** + * estimates memory consumption. + */ + unsigned int memory_consumption () const; }; @@ -441,7 +487,10 @@ CompressedSimpleSparsityPattern::add (const unsigned int i, Assert (i #include +#include #include #include @@ -33,7 +34,6 @@ DEAL_II_NAMESPACE_OPEN template class Table; -template class Vector; template class FullMatrix; class SparsityPattern; class CompressedSparsityPattern; diff --git a/deal.II/lac/include/lac/petsc_block_sparse_matrix.h b/deal.II/lac/include/lac/petsc_block_sparse_matrix.h index 3e83d7c15f..edb9648a63 100644 --- a/deal.II/lac/include/lac/petsc_block_sparse_matrix.h +++ b/deal.II/lac/include/lac/petsc_block_sparse_matrix.h @@ -15,16 +15,16 @@ #include -#include -#include -#include -#include -#include -#include +#ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include +# include -#ifdef DEAL_II_USE_PETSC +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_block_vector.h b/deal.II/lac/include/lac/petsc_block_vector.h index 6026ac6c7c..c526cbf449 100644 --- a/deal.II/lac/include/lac/petsc_block_vector.h +++ b/deal.II/lac/include/lac/petsc_block_vector.h @@ -15,14 +15,15 @@ #include -#include -#include -#include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_full_matrix.h b/deal.II/lac/include/lac/petsc_full_matrix.h index ed89cb741c..66dace0b31 100644 --- a/deal.II/lac/include/lac/petsc_full_matrix.h +++ b/deal.II/lac/include/lac/petsc_full_matrix.h @@ -15,11 +15,12 @@ #include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include + DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_matrix_base.h b/deal.II/lac/include/lac/petsc_matrix_base.h index 7291d6f84f..2d9e06699f 100644 --- a/deal.II/lac/include/lac/petsc_matrix_base.h +++ b/deal.II/lac/include/lac/petsc_matrix_base.h @@ -15,16 +15,17 @@ #include -#include -#include -#include #ifdef DEAL_II_USE_PETSC -#include -#include -#include -#include +# include +# include +# include + +# include +# include +# include +# include DEAL_II_NAMESPACE_OPEN @@ -664,6 +665,10 @@ namespace PETScWrappers * allowed. This function must * therefore be called once you have * assembled the matrix. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + * more information. */ void compress (); @@ -1107,6 +1112,12 @@ namespace PETScWrappers */ void write_ascii (); + /** + * Returns the number bytes consumed + * by this matrix on this CPU. + */ + unsigned int memory_consumption() const; + /** * Exception */ diff --git a/deal.II/lac/include/lac/petsc_parallel_block_sparse_matrix.h b/deal.II/lac/include/lac/petsc_parallel_block_sparse_matrix.h index 6f6dbc1e4a..f4489cb5b2 100644 --- a/deal.II/lac/include/lac/petsc_parallel_block_sparse_matrix.h +++ b/deal.II/lac/include/lac/petsc_parallel_block_sparse_matrix.h @@ -15,15 +15,16 @@ #include -#include -#include -#include -#include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_parallel_block_vector.h b/deal.II/lac/include/lac/petsc_parallel_block_vector.h index 393d2caebd..90803f64de 100644 --- a/deal.II/lac/include/lac/petsc_parallel_block_vector.h +++ b/deal.II/lac/include/lac/petsc_parallel_block_vector.h @@ -15,13 +15,14 @@ #include -#include -#include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h b/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h index c58591aea4..38ead029a8 100644 --- a/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h +++ b/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h @@ -15,14 +15,13 @@ #include -#include - -#include - -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include + DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_parallel_vector.h b/deal.II/lac/include/lac/petsc_parallel_vector.h index 440a1b0e77..d05859da3c 100644 --- a/deal.II/lac/include/lac/petsc_parallel_vector.h +++ b/deal.II/lac/include/lac/petsc_parallel_vector.h @@ -13,15 +13,16 @@ #ifndef __deal2__petsc_parallel_vector_h #define __deal2__petsc_parallel_vector_h -#include -#include -#include -#include -#include +#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_precondition.h b/deal.II/lac/include/lac/petsc_precondition.h index 25c8b35f74..f18d7c7628 100644 --- a/deal.II/lac/include/lac/petsc_precondition.h +++ b/deal.II/lac/include/lac/petsc_precondition.h @@ -13,13 +13,13 @@ #ifndef __deal2__petsc_precondition_h #define __deal2__petsc_precondition_h -#include -#include +#include #ifdef DEAL_II_USE_PETSC -#include +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_solver.h b/deal.II/lac/include/lac/petsc_solver.h index 85bce7a21c..0aac5ad434 100644 --- a/deal.II/lac/include/lac/petsc_solver.h +++ b/deal.II/lac/include/lac/petsc_solver.h @@ -13,15 +13,16 @@ #ifndef __deal2__petsc_solver_h #define __deal2__petsc_solver_h -#include -#include -#include -#include +#include #ifdef DEAL_II_USE_PETSC -#include +# include +# include +# include + +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_sparse_matrix.h b/deal.II/lac/include/lac/petsc_sparse_matrix.h index bbf00a6087..fc7b40a25d 100644 --- a/deal.II/lac/include/lac/petsc_sparse_matrix.h +++ b/deal.II/lac/include/lac/petsc_sparse_matrix.h @@ -15,13 +15,13 @@ #include -#include -#include - -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include + DEAL_II_NAMESPACE_OPEN // forward declaration template class BlockMatrixBase; diff --git a/deal.II/lac/include/lac/petsc_vector.h b/deal.II/lac/include/lac/petsc_vector.h index fbe1a67390..166570736d 100644 --- a/deal.II/lac/include/lac/petsc_vector.h +++ b/deal.II/lac/include/lac/petsc_vector.h @@ -13,15 +13,17 @@ #ifndef __deal2__petsc_vector_h #define __deal2__petsc_vector_h + #include -#include -#include -#include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/petsc_vector_base.h b/deal.II/lac/include/lac/petsc_vector_base.h index 85214a92f5..be7d1385ef 100644 --- a/deal.II/lac/include/lac/petsc_vector_base.h +++ b/deal.II/lac/include/lac/petsc_vector_base.h @@ -15,15 +15,16 @@ #include -#include -#include - -#include -#include #ifdef DEAL_II_USE_PETSC -#include +# include +# include + +# include +# include + +# include DEAL_II_NAMESPACE_OPEN @@ -256,6 +257,10 @@ namespace PETScWrappers * is necessary after writing into a * vector element-by-element and before * anything else can be done on it. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + * more information. */ void compress (); diff --git a/deal.II/lac/include/lac/slepc_solver.h b/deal.II/lac/include/lac/slepc_solver.h index da355171f0..8d97aa42bf 100644 --- a/deal.II/lac/include/lac/slepc_solver.h +++ b/deal.II/lac/include/lac/slepc_solver.h @@ -15,16 +15,18 @@ #ifndef __deal2__slepc_solver_h #define __deal2__slepc_solver_h + #include -#include -#include -#include -#include #ifdef DEAL_II_USE_SLEPC -#include -#include +# include +# include +# include +# include + +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/slepc_spectral_transformation.h b/deal.II/lac/include/lac/slepc_spectral_transformation.h index 347165f6fa..41722c635d 100644 --- a/deal.II/lac/include/lac/slepc_spectral_transformation.h +++ b/deal.II/lac/include/lac/slepc_spectral_transformation.h @@ -15,16 +15,18 @@ #ifndef __deal2__slepc_spectral_transformation_h #define __deal2__slepc_spectral_transformation_h + #include -#include -#include -#include -#include #ifdef DEAL_II_USE_SLEPC -#include -#include +# include +# include +# include +# include + +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h b/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h index c03cf2ab77..b0fa7930b8 100644 --- a/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h +++ b/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h @@ -15,17 +15,17 @@ #include -#include -#include -#include -#include -#include -#include -#include +#ifdef DEAL_II_USE_TRILINOS +# include +# include +# include +# include +# include +# include -#ifdef DEAL_II_USE_TRILINOS +# include # define TrilinosScalar double diff --git a/deal.II/lac/include/lac/trilinos_block_vector.h b/deal.II/lac/include/lac/trilinos_block_vector.h index bc65485c11..6836bf2f03 100644 --- a/deal.II/lac/include/lac/trilinos_block_vector.h +++ b/deal.II/lac/include/lac/trilinos_block_vector.h @@ -15,13 +15,14 @@ #include -#include -#include -#include -#include #ifdef DEAL_II_USE_TRILINOS +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN // forward declaration diff --git a/deal.II/lac/include/lac/trilinos_precondition.h b/deal.II/lac/include/lac/trilinos_precondition.h index 9a5d36e95d..51d7daed85 100644 --- a/deal.II/lac/include/lac/trilinos_precondition.h +++ b/deal.II/lac/include/lac/trilinos_precondition.h @@ -15,23 +15,24 @@ #include -#include -#include - -#include #ifdef DEAL_II_USE_TRILINOS -#ifdef DEAL_II_COMPILER_SUPPORTS_MPI -# include -#else -# include -#endif -#include +# include +# include + +# include + +# ifdef DEAL_II_COMPILER_SUPPORTS_MPI +# include +# else +# include +# endif +# include -#include -#include -#include +# include +# include +# include // forward declarations class Ifpack_Preconditioner; diff --git a/deal.II/lac/include/lac/trilinos_precondition_block.h b/deal.II/lac/include/lac/trilinos_precondition_block.h index 69d40311c1..b3dc161707 100644 --- a/deal.II/lac/include/lac/trilinos_precondition_block.h +++ b/deal.II/lac/include/lac/trilinos_precondition_block.h @@ -15,16 +15,16 @@ #include -#include -#include -#include - #ifdef DEAL_II_USE_TRILINOS -#include -#include -#include +# include +# include +# include + +# include +# include +# include // some forward declarations class Ifpack_Preconditioner; diff --git a/deal.II/lac/include/lac/trilinos_solver.h b/deal.II/lac/include/lac/trilinos_solver.h index 7863026a4d..59bf20bb30 100644 --- a/deal.II/lac/include/lac/trilinos_solver.h +++ b/deal.II/lac/include/lac/trilinos_solver.h @@ -13,17 +13,19 @@ #ifndef __deal2__trilinos_solver_h #define __deal2__trilinos_solver_h + #include -#include -#include -#include #ifdef DEAL_II_USE_TRILINOS -#include -#include -#include -#include +# include +# include +# include + +# include +# include +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/trilinos_solver_block.h b/deal.II/lac/include/lac/trilinos_solver_block.h index 8cd2d6d9c5..76cd35ec9a 100644 --- a/deal.II/lac/include/lac/trilinos_solver_block.h +++ b/deal.II/lac/include/lac/trilinos_solver_block.h @@ -13,12 +13,13 @@ #ifndef __deal2__trilinos_block_solver_h #define __deal2__trilinos_block_solver_h + #include -#include -#include #ifdef DEAL_II_USE_TRILINOS +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/include/lac/trilinos_sparse_matrix.h b/deal.II/lac/include/lac/trilinos_sparse_matrix.h index a09d7b98f0..0e53779ccb 100644 --- a/deal.II/lac/include/lac/trilinos_sparse_matrix.h +++ b/deal.II/lac/include/lac/trilinos_sparse_matrix.h @@ -15,19 +15,18 @@ #include -#include -#include -#include -#include -#include -#include -#include - -#include +#ifdef DEAL_II_USE_TRILINOS +# include +# include +# include +# include +# include -#ifdef DEAL_II_USE_TRILINOS +# include +# include +# include # define TrilinosScalar double # include @@ -126,7 +125,7 @@ namespace TrilinosWrappers << " of a distributed matrix, but only rows " << arg2 << " through " << arg3 << " are stored locally and can be accessed."); - + private: /** * The matrix accessed. @@ -175,7 +174,7 @@ namespace TrilinosWrappers * of this row. */ std_cxx1x::shared_ptr > value_cache; - + /** * Discard the old row caches * (they may still be used by @@ -192,19 +191,19 @@ namespace TrilinosWrappers */ friend class const_iterator; }; - + public: - + /** * Constructor. Create an * iterator into the matrix @p * matrix for the given row and * the index within it. - */ + */ const_iterator (const SparseMatrix *matrix, const unsigned int row, const unsigned int index); - + /** * Prefix increment. */ @@ -255,7 +254,7 @@ namespace TrilinosWrappers << "Attempt to access element " << arg2 << " of row " << arg1 << " which doesn't have that many elements."); - + private: /** * Store an object of the @@ -263,10 +262,10 @@ namespace TrilinosWrappers */ Accessor accessor; }; - + } - - + + /** * This class implements a wrapper to use the Trilinos distributed * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of @@ -292,7 +291,7 @@ namespace TrilinosWrappers * that compresses the storage format for sparse matrices by discarding * unused elements. Trilinos allows to continue with assembling the * matrix after calls to these functions, though. - * + * * @ingroup TrilinosWrappers * @ingroup Matrix1 * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 @@ -334,7 +333,7 @@ namespace TrilinosWrappers * classes. */ typedef TrilinosScalar value_type; - + /** * @name Constructors and initalization. */ @@ -346,43 +345,279 @@ namespace TrilinosWrappers SparseMatrix (); /** - * Constructor using an Epetra_Map - * and a maximum number of nonzero - * matrix entries. Note that this - * number does not need to be exact, - * and it is even allowed that the - * actual matrix structure has more - * nonzero entries than specified in - * the constructor. However it is - * still advantageous to provide good + * Generate a matrix that is completely + * stored locally, having #m rows and + * #n columns. + * + * The number of columns entries per + * row is specified as the maximum + * number of entries argument. + */ + SparseMatrix (const unsigned int m, + const unsigned int n, + const unsigned int n_max_entries_per_row); + + /** + * Generate a matrix that is completely + * stored locally, having #m rows and + * #n columns. + * + * The vector + * n_entries_per_row + * specifies the number of entries in + * each row. + */ + SparseMatrix (const unsigned int m, + const unsigned int n, + const std::vector &n_entries_per_row); + + /** + * Generate a matrix from a Trilinos + * sparsity pattern object. + */ + SparseMatrix (const SparsityPattern &InputSparsityPattern); + + /** + * Copy constructor. Sets the + * calling matrix to be the same + * as the input matrix, i.e., + * using the same sparsity + * pattern and entries. + */ + SparseMatrix (const SparseMatrix &InputMatrix); + + /** + * Destructor. Made virtual so + * that one can use pointers to + * this class. + */ + virtual ~SparseMatrix (); + + /** + * This function initializes the + * Trilinos matrix with a deal.II + * sparsity pattern, i.e. it makes + * the Trilinos Epetra matrix know + * the position of nonzero entries + * according to the sparsity + * pattern. This function is meant + * for use in serial programs, where + * there is no need to specify how + * the matrix is going to be + * distributed among different + * processors. This function works in + * parallel, too, but it is + * recommended to manually specify + * the %parallel partioning of the + * matrix using an Epetra_Map. When + * run in parallel, it is currently + * necessary that each processor + * holds the sparsity_pattern + * structure because each processor + * sets its rows. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const SparsityType &sparsity_pattern); + + /** + * This function reinitializes the + * Trilinos sparse matrix from a + * (possibly distributed) Trilinos + * sparsity pattern. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + void reinit (const SparsityPattern &sparsity_pattern); + + /** + * This function copies the content + * in sparse_matrix to the + * calling matrix. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + void reinit (const SparseMatrix &sparse_matrix); + + /** + * This function initializes the + * Trilinos matrix using the deal.II + * sparse matrix and the entries + * stored therein. It uses a + * threshold to copy only elements + * with modulus larger than the + * threshold (so zeros in the deal.II + * matrix can be filtered away). + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const ::dealii::SparseMatrix &dealii_sparse_matrix, + const double drop_tolerance=1e-13, + const bool copy_values=true); + + /** + * This reinit function takes as + * input a Trilinos Epetra_CrsMatrix + * and copies its sparsity + * pattern. If so requested, even the + * content (values) will be copied. + */ + void reinit (const Epetra_CrsMatrix &input_matrix, + const bool copy_values = true); + + /** + * This operator assigns a scalar to + * a matrix. Since this does usually + * not make much sense (should we set + * all matrix entries to this value? + * Only the nonzero entries of the + * sparsity pattern?), this operation + * is only allowed if the actual + * value to be assigned is zero. This + * operator only exists to allow for + * the obvious notation + * matrix=0, which sets all + * elements of the matrix to zero, + * but keeps the sparsity pattern + * previously used. + */ + SparseMatrix & + operator = (const double d); + + /** + * Release all memory and return to a + * state just like after having + * called the default constructor. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + void clear (); + + /** + * This command does two things: + *
    + *
  • If the matrix was initialized + * without a sparsity pattern, + * elements have been added manually + * using the set() command. When this + * process is completed, a call to + * compress() reorganizes the + * internal data structures (aparsity + * pattern) so that a fast access to + * data is possible in matrix-vector + * products. + *
  • If the matrix structure has + * already been fixed (either by + * initialization with a sparsity + * pattern or by calling compress() + * during the setup phase), this + * command does the %parallel + * exchange of data. This is + * necessary when we perform assembly + * on more than one (MPI) process, + * because then some non-local row + * data will accumulate on nodes that + * belong to the current's processor + * element, but are actually held by + * another. This command is usually + * called after all elements have + * been traversed. + *
+ * + * In both cases, this function + * compresses the data structures and + * allows the resulting matrix to be + * used in all other operations like + * matrix-vector products. This is a + * collective operation, i.e., it + * needs to be run on all processors + * when used in parallel. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + */ + void compress (); + + /** + * Returns the state of the matrix, + * i.e., whether compress() needs to + * be called after an operation + * requiring data exchange. A call to + * compress() is also needed when the + * method set() has been called (even + * when working in serial). + */ + bool is_compressed () const; +//@} +/** + * @name Constructors and initialization using an Epetra_Map description + */ +//@{ + /** + * Constructor using an Epetra_Map to + * describe the %parallel + * partitioning. The parameter @p + * n_max_entries_per_row sets the + * number of nonzero entries in each + * row that will be allocated. Note + * that this number does not need to + * be exact, and it is even allowed + * that the actual matrix structure + * has more nonzero entries than + * specified in the + * constructor. However it is still + * advantageous to provide good * estimates here since this will * considerably increase the * performance of the matrix - * setup. However, there should be no - * effect in the performance of + * setup. However, there is no effect + * in the performance of * matrix-vector products, since - * Trilinos wants to reorganize the - * matrix memory prior to use. + * Trilinos reorganizes the matrix + * memory prior to use (in the + * compress() step). */ - SparseMatrix (const Epetra_Map &InputMap, - const unsigned int n_max_entries_per_row); - - /** - * Same as before, but now use - * the exact number of nonzeros - * in each matrix row. Since we - * know the number of elements - * in the matrix exactly in - * this case, we can already - * allocate the right amount of - * memory, which makes the - * creation process including - * the insertion of nonzero + SparseMatrix (const Epetra_Map ¶llel_partitioning, + const unsigned int n_max_entries_per_row = 0); + + /** + * Same as before, but now set a + * value of nonzeros for each matrix + * row. Since we know the number of + * elements in the matrix exactly in + * this case, we can already allocate + * the right amount of memory, which + * makes the creation process + * including the insertion of nonzero * elements by the respective * SparseMatrix::reinit call * considerably faster. */ - SparseMatrix (const Epetra_Map &InputMap, + SparseMatrix (const Epetra_Map ¶llel_partitioning, const std::vector &n_entries_per_row); /** @@ -409,14 +644,14 @@ namespace TrilinosWrappers * doing matrix-vector products with * vectors based on that column map. * - * The number of columns entries - * per row is specified as the - * maximum number of entries - * argument. + * The integer input @p + * n_max_entries_per_row defines the + * number of columns entries per row + * that will be allocated. */ - SparseMatrix (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, - const unsigned int n_max_entries_per_row); + SparseMatrix (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, + const unsigned int n_max_entries_per_row = 0); /** * This constructor is similar to the @@ -429,7 +664,7 @@ namespace TrilinosWrappers * distribution of degrees of freedom * associated with matrix rows and * the second one specifies the - * parallel distribution the dofs + * %parallel distribution the dofs * associated with columns in the * matrix. The second map also * provides information for the @@ -447,89 +682,256 @@ namespace TrilinosWrappers * each row of the newly generated * matrix. */ - SparseMatrix (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + SparseMatrix (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, const std::vector &n_entries_per_row); - /** - * Generate a matrix that is completely - * stored locally, having #m rows and - * #n columns. + /** + * This function is initializes the + * Trilinos Epetra matrix according to + * the specified sparsity_pattern, and + * also reassigns the matrix rows to + * different processes according to a + * user-supplied Epetra map. In + * programs following the style of the + * tutorial programs, this function + * (and the respective call for a + * rectangular matrix) are the natural + * way to initialize the matrix size, + * its distribution among the MPI + * processes (if run in parallel) as + * well as the locatoin of non-zero + * elements. Trilinos stores the + * sparsity pattern internally, so it + * won't be needed any more after this + * call, in contrast to the deal.II own + * object. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. If + * the flag is not set, each processor + * just sets the elements in the + * sparsity pattern that belong to its + * rows. * - * The number of columns entries per - * row is specified as the maximum - * number of entries argument. + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. */ - SparseMatrix (const unsigned int m, - const unsigned int n, - const unsigned int n_max_entries_per_row); + template + void reinit (const Epetra_Map ¶llel_partitioning, + const SparsityType &sparsity_pattern, + const bool exchange_data = false); - /** - * Generate a matrix that is completely - * stored locally, having #m rows and - * #n columns. + /** + * This function is similar to the + * other initialization function + * above, but now also reassigns the + * matrix rows and columns according + * to two user-supplied Epetra maps. + * To be used for rectangular + * matrices. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. * - * The vector - * n_entries_per_row - * specifies the number of entries in - * each row. - */ - SparseMatrix (const unsigned int m, - const unsigned int n, - const std::vector &n_entries_per_row); - - /** - * Generate a matrix from a Trilinos - * sparsity pattern object. + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. */ - SparseMatrix (const SparsityPattern &InputSparsityPattern); + template + void reinit (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, + const SparsityType &sparsity_pattern, + const bool exchange_data = false); - /** - * Copy constructor. Sets the - * calling matrix to be the same - * as the input matrix, i.e., - * using the same sparsity - * pattern and entries. + /** + * This function initializes the + * Trilinos matrix using the deal.II + * sparse matrix and the entries + * stored therein. It uses a + * threshold to copy only elements + * with modulus larger than the + * threshold (so zeros in the deal.II + * matrix can be filtered away). In + * contrast to the other reinit + * function with deal.II sparse + * matrix argument, this function + * takes a %parallel partitioning + * specified by the user instead of + * internally generating one. + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. */ - SparseMatrix (const SparseMatrix &InputMatrix); + template + void reinit (const Epetra_Map ¶llel_partitioning, + const ::dealii::SparseMatrix &dealii_sparse_matrix, + const double drop_tolerance=1e-13, + const bool copy_values=true); - /** - * Destructor. Made virtual so - * that one can use pointers to - * this class. + /** + * This function is similar to the + * other initialization function with + * deal.II sparse matrix input above, + * but now takes Epetra maps for both + * the rows and the columns of the + * matrix. Chosen for rectangular + * matrices. + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. */ - virtual ~SparseMatrix (); - - /** - * This function initializes the - * Trilinos matrix with a deal.II - * sparsity pattern, i.e. it makes - * the Trilinos Epetra matrix know - * the position of nonzero entries - * according to the sparsity - * pattern. This function is meant - * for use in serial programs, where - * there is no need to specify how - * the matrix is going to be - * distributed among different - * processors. This function works in - * parallel, too, but it is - * recommended to manually specify - * the parallel partioning of the - * matrix using an Epetra_Map. When - * run in parallel, it is currently - * necessary that each processor - * holds the sparsity_pattern - * structure because each processor - * sets its rows. + template + void reinit (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, + const ::dealii::SparseMatrix &dealii_sparse_matrix, + const double drop_tolerance=1e-13, + const bool copy_values=true); +//@} +/** + * @name Constructors and initialization using an IndexSet description + */ +//@{ + /** + * Constructor using an IndexSet and + * an MPI communicator to describe + * the %parallel partitioning. The + * parameter @p n_max_entries_per_row + * sets the number of nonzero entries + * in each row that will be + * allocated. Note that this number + * does not need to be exact, and it + * is even allowed that the actual + * matrix structure has more nonzero + * entries than specified in the + * constructor. However it is still + * advantageous to provide good + * estimates here since this will + * considerably increase the + * performance of the matrix + * setup. However, there is no effect + * in the performance of + * matrix-vector products, since + * Trilinos reorganizes the matrix + * memory prior to use (in the + * compress() step). + */ + SparseMatrix (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_max_entries_per_row = 0); + + /** + * Same as before, but now set the + * number of nonzeros in each matrix + * row separately. Since we know the + * number of elements in the matrix + * exactly in this case, we can + * already allocate the right amount + * of memory, which makes the + * creation process including the + * insertion of nonzero elements by + * the respective + * SparseMatrix::reinit call + * considerably faster. + */ + SparseMatrix (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); + + /** + * This constructor is similar to the + * one above, but it now takes two + * different IndexSet partitions for + * row and columns. This interface is + * meant to be used for generating + * rectangular matrices, where the + * first index set describes the + * %parallel partitioning of the + * degrees of freedom associated with + * the matrix rows and the second one + * the partitioning of the matrix + * columns. The second index set + * specifies the partitioning of the + * vectors this matrix is to be + * multiplied with, not the + * distribution of the elements that + * actually appear in the matrix. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * The parameter @p + * n_max_entries_per_row defines how + * much memory will be allocated for + * each row. This number does not + * need to be accurate, as the + * structure is reorganized in the + * compress() call. */ - template - void reinit (const SparsityType &sparsity_pattern); + SparseMatrix (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_max_entries_per_row = 0); + + /** + * This constructor is similar to the + * one above, but it now takes two + * different Epetra maps for rows and + * columns. This interface is meant + * to be used for generating + * rectangular matrices, where one + * map specifies the parallel + * distribution of degrees of freedom + * associated with matrix rows and + * the second one specifies the + * %parallel distribution the dofs + * associated with columns in the + * matrix. The second map also + * provides information for the + * internal arrangement in matrix + * vector products (i.e., the + * distribution of vector this matrix + * is to be multiplied with), but is + * not used for the distribution of + * the columns – rather, all + * column elements of a row are + * stored on the same processor in + * any case. The vector + * n_entries_per_row + * specifies the number of entries in + * each row of the newly generated + * matrix. + */ + SparseMatrix (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); /** * This function is initializes the @@ -537,7 +939,8 @@ namespace TrilinosWrappers * to the specified sparsity_pattern, * and also reassigns the matrix rows * to different processes according - * to a user-supplied Epetra map. In + * to a user-supplied index set and + * %parallel communicator. In * programs following the style of * the tutorial programs, this * function (and the respective call @@ -551,11 +954,18 @@ namespace TrilinosWrappers * internally, so it won't be needed * any more after this call, in * contrast to the deal.II own - * object. In a parallel run, it is - * currently necessary that each - * processor holds the - * sparsity_pattern structure because - * each processor sets its rows. + * object. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. If + * the flag is not set, each + * processor just sets the elements + * in the sparsity pattern that + * belong to its rows. * * This is a collective operation * that needs to be called on all @@ -563,17 +973,26 @@ namespace TrilinosWrappers * dead lock. */ template - void reinit (const Epetra_Map &input_map, - const SparsityType &sparsity_pattern); + void reinit (const IndexSet ¶llel_partitioning, + const SparsityType &sparsity_pattern, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool exchange_data = false); /** * This function is similar to the * other initialization function * above, but now also reassigns the * matrix rows and columns according - * to two user-supplied Epetra maps. + * to two user-supplied index sets. * To be used for rectangular - * matrices. + * matrices. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. * * This is a collective operation * that needs to be called on all @@ -581,61 +1000,11 @@ namespace TrilinosWrappers * dead lock. */ template - void reinit (const Epetra_Map &input_row_map, - const Epetra_Map &input_col_map, - const SparsityType &sparsity_pattern); - - /** - * This function reinitializes the - * Trilinos sparse matrix from a - * (possibly distributed) Trilinos - * sparsity pattern. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - void reinit (const SparsityPattern &sparsity_pattern); - - /** - * This function copies the content - * in sparse_matrix to the - * calling matrix. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - void reinit (const SparseMatrix &sparse_matrix); - - /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries - * stored therein. It uses a - * threshold to copy only elements - * with modulus larger than the - * threshold (so zeros in the deal.II - * matrix can be filtered away). - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix - * entries should be copied, too. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const ::dealii::SparseMatrix &dealii_sparse_matrix, - const double drop_tolerance=1e-13, - const bool copy_values=true); + void reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityType &sparsity_pattern, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool exchange_data = false); /** * This function initializes the @@ -649,7 +1018,7 @@ namespace TrilinosWrappers * contrast to the other reinit * function with deal.II sparse * matrix argument, this function - * takes a parallel partitioning + * takes a %parallel partitioning * specified by the user instead of * internally generating one. * @@ -666,8 +1035,9 @@ namespace TrilinosWrappers * dead lock. */ template - void reinit (const Epetra_Map &input_map, + void reinit (const IndexSet ¶llel_partitioning, const ::dealii::SparseMatrix &dealii_sparse_matrix, + const MPI_Comm &communicator = MPI_COMM_WORLD, const double drop_tolerance=1e-13, const bool copy_values=true); @@ -675,7 +1045,7 @@ namespace TrilinosWrappers * This function is similar to the * other initialization function with * deal.II sparse matrix input above, - * but now takes Epetra maps for both + * but now takes index sets for both * the rows and the columns of the * matrix. Chosen for rectangular * matrices. @@ -693,87 +1063,18 @@ namespace TrilinosWrappers * dead lock. */ template - void reinit (const Epetra_Map &input_row_map, - const Epetra_Map &input_col_map, + void reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, const ::dealii::SparseMatrix &dealii_sparse_matrix, + const MPI_Comm &communicator = MPI_COMM_WORLD, const double drop_tolerance=1e-13, const bool copy_values=true); - - /** - * This reinit function takes as - * input a Trilinos Epetra_CrsMatrix - * and copies its sparsity - * pattern. If so requested, even the - * content (values) will be copied. - */ - void reinit (const Epetra_CrsMatrix &input_matrix, - const bool copy_values = true); - - /** - * This operator assigns a scalar to - * a matrix. Since this does usually - * not make much sense (should we set - * all matrix entries to this value? - * Only the nonzero entries of the - * sparsity pattern?), this operation - * is only allowed if the actual - * value to be assigned is zero. This - * operator only exists to allow for - * the obvious notation - * matrix=0, which sets all - * elements of the matrix to zero, - * but keeps the sparsity pattern - * previously used. - */ - SparseMatrix & - operator = (const double d); - - /** - * Release all memory and return to a - * state just like after having - * called the default constructor. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - void clear (); - - /** - * Trilinos matrices store their own - * sparsity patterns. So, in analogy - * to our own SparsityPattern class, - * this function compresses the - * sparsity pattern and allows the - * resulting matrix to be used in all - * other operations where before only - * assembly functions were - * allowed. This function must - * therefore be called once you have - * assembled the matrix. This is a - * collective operation, i.e., it - * needs to be run on all processors - * when used in parallel. - */ - void compress (); - - /** - * Returns the state of the matrix, - * i.e., whether compress() needs to - * be called after an operation - * requiring data exchange. A call to - * compress() is also needed when the - * method set() has been called (even - * when working in serial). - */ - bool is_compressed () const; //@} /** * @name Information on the matrix */ //@{ - + /** * Return the number of rows in * this matrix. @@ -793,7 +1094,7 @@ namespace TrilinosWrappers * present MPI process. For * sequential matrices, this * number is the same as m(), - * but for parallel matrices it + * but for %parallel matrices it * may be smaller. * * To figure out which elements @@ -1147,7 +1448,7 @@ namespace TrilinosWrappers * by a fixed factor. */ SparseMatrix & operator *= (const TrilinosScalar factor); - + /** * Divide the entire matrix by * a fixed factor. @@ -1207,7 +1508,7 @@ namespace TrilinosWrappers * elements) from it — * without this operation, * removing constraints on - * parallel matrices is a + * %parallel matrices is a * rather complicated * procedure. * @@ -1632,14 +1933,14 @@ namespace TrilinosWrappers * @name Matrix norms */ //@{ - + /** * Return the * l1-norm of * the matrix, that is * $|M|_1= * \max_{\mathrm{all\ columns\ } j} - * \sum_{\mathrm{all\ rows\ } i} + * \sum_{\mathrm{all\ rows\ } i} * |M_{ij}|$, (max. sum * of columns). This is the * natural matrix norm that is @@ -1654,8 +1955,8 @@ namespace TrilinosWrappers /** * Return the linfty-norm of the * matrix, that is - * $|M|_\infty=\max_{\mathrm{all\ - * rows\ } i}\sum_{\mathrm{all\ + * $|M|_\infty=\max_{\mathrm{all\ + * rows\ } i}\sum_{\mathrm{all\ * columns\ } j} |M_{ij}|$, * (max. sum of rows). This is * the natural matrix norm that @@ -1805,21 +2106,27 @@ namespace TrilinosWrappers * implemented. */ void write_ascii (); - + /** - * Print the matrix to the - * given stream, using the - * format (line,col) - * value, i.e. one nonzero - * entry of the matrix per - * line. + * Print the matrix to the given + * stream, using the format + * (line,col) value, + * i.e. one nonzero entry of the + * matrix per line. The optional flag + * outputs the sparsity pattern in + * Trilinos style, where even the + * according processor number is + * printed to the stream, as well as + * a summary before actually writing + * the entries. */ - void print (std::ostream &out) const; - + void print (std::ostream &out, + const bool write_extended_trilinos_info = false) const; + // TODO: Write an overloading // of the operator << for output. - // Since the underlying Trilinos - // object supports it, this should + // Since the underlying Trilinos + // object supports it, this should // be very easy. //@} @@ -1846,6 +2153,11 @@ namespace TrilinosWrappers */ DeclException0 (ExcSourceEqualsDestination); + /** + * Exception + */ + DeclException0 (ExcMatrixNotCompressed); + /** * Exception */ @@ -1867,10 +2179,10 @@ namespace TrilinosWrappers << " of a sparse matrix, but it appears to not" << " exist in the Trilinos sparsity pattern."); //@} - - protected: + + protected: /** * For some matrix storage @@ -1906,31 +2218,8 @@ namespace TrilinosWrappers void prepare_set(); - - private: - /** - * A pointer to the communicator used - * for all operations in this object. - * - * Note that we create a new - * communicator (with a unique MPI ID) - * for each object if we are running in - * parallel. - */ - boost::scoped_ptr communicator; - - /** - * Epetra Trilinos - * mapping of the matrix rows - * that assigns parts of the - * matrix to the individual - * processes. This map is - * provided either via the - * constructor or in a reinit - * function. - */ - Epetra_Map row_map; + private: /** * Pointer to the user-supplied @@ -1939,31 +2228,39 @@ namespace TrilinosWrappers * assigns parts of the matrix * to the individual processes. */ - Epetra_Map col_map; - - /** - * Trilinos doesn't allow to - * mix additions to matrix - * entries and overwriting them - * (to make synchronisation of - * parallel computations - * simpler). The way we do it - * is to, for each access - * operation, store whether it - * is an insertion or an - * addition. If the previous - * one was of different type, - * then we first have to flush - * the Trilinos buffers; - * otherwise, we can simply go - * on. Luckily, Trilinos has - * an object for this which - * does already all the - * parallel communications in - * such a case, so we simply - * use their model, which - * stores whether the last - * operation was an addition or + std::auto_ptr column_space_map; + + /** + * A sparse matrix object in + * Trilinos to be used for + * finite element based + * problems which allows for + * assembling into non-local + * elements. The actual type, + * a sparse matrix, is set in + * the constructor. + */ + std::auto_ptr matrix; + + /** + * Trilinos doesn't allow to mix + * additions to matrix entries and + * overwriting them (to make + * synchronisation of %parallel + * computations simpler). The way we + * do it is to, for each access + * operation, store whether it is an + * insertion or an addition. If the + * previous one was of different + * type, then we first have to flush + * the Trilinos buffers; otherwise, + * we can simply go on. Luckily, + * Trilinos has an object for this + * which does already all the + * %parallel communications in such a + * case, so we simply use their + * model, which stores whether the + * last operation was an addition or * an insertion. */ Epetra_CombineMode last_action; @@ -1976,12 +2273,11 @@ namespace TrilinosWrappers bool compressed; /** - * An internal Trilinos vector - * that is used for - * accelerating vmult_add - * functions (do not need to - * allocate too many temporary - * vectors). + * An internal Trilinos vector that + * is used for accelerating vmult_add + * functions (in order not to need to + * recreate temporary vectors every + * time that function is called). */ mutable VectorBase temp_vector; @@ -2003,23 +2299,11 @@ namespace TrilinosWrappers */ std::vector column_values; - /** - * A sparse matrix object in - * Trilinos to be used for - * finite element based - * problems which allows for - * assembling into non-local - * elements. The actual type, - * a sparse matrix, is set in - * the constructor. - */ - std::auto_ptr matrix; - /** * To allow calling protected * prepare_add() and * prepare_set(). - */ + */ friend class BlockMatrixBase; }; @@ -2107,14 +2391,14 @@ namespace TrilinosWrappers ++accessor.a_index; // If at end of line: do one - // step, then cycle until we + // step, then cycle until we // find a row with a nonzero // number of entries. if (accessor.a_index >= accessor.colnum_cache->size()) { accessor.a_index = 0; ++accessor.a_row; - + while ((accessor.a_row < accessor.matrix->m()) && (accessor.matrix->row_length(accessor.a_row) == 0)) @@ -2186,9 +2470,9 @@ namespace TrilinosWrappers (accessor.row() == other.accessor.row() && accessor.index() < other.accessor.index())); } - + } - + inline @@ -2234,7 +2518,7 @@ namespace TrilinosWrappers for (unsigned int i=r+1; i 0) return const_iterator(this, i, 0); - + // if there is no such line, then take the // end iterator of the matrix return end(); @@ -2249,7 +2533,7 @@ namespace TrilinosWrappers int begin, end; begin = matrix->RowMap().MinMyGID(); end = matrix->RowMap().MaxMyGID()+1; - + return ((index >= static_cast(begin)) && (index < static_cast(end))); } @@ -2271,8 +2555,9 @@ namespace TrilinosWrappers { // flush buffers int ierr; - ierr = matrix->GlobalAssemble (col_map, row_map, true); - + ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(), + true); + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); ierr = matrix->OptimizeStorage (); @@ -2301,11 +2586,11 @@ namespace TrilinosWrappers // Inline the set() and add() - // functions, since they will be - // called frequently, and the - // compiler can optimize away + // functions, since they will be + // called frequently, and the + // compiler can optimize away // some unnecessary loops when - // the sizes are given at + // the sizes are given at // compile time. inline void @@ -2386,13 +2671,13 @@ namespace TrilinosWrappers int ierr; if (last_action == Add) { - ierr = matrix->GlobalAssemble(col_map, row_map, false); + ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(), + true); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + Assert (ierr == 0, ExcTrilinosError(ierr)); } - if (last_action != Insert) - last_action = Insert; + last_action = Insert; int * col_index_ptr; TrilinosScalar const* col_value_ptr; @@ -2444,33 +2729,33 @@ namespace TrilinosWrappers // can directly call the Epetra_CrsMatrix // input function, which is much faster // than the Epetra_FECrsMatrix - // function. We distinguish between two + // function. We distinguish between two // cases: the first one is when the matrix - // is not filled (i.e., it is possible to - // add new elements to the sparsity pattern), + // is not filled (i.e., it is possible to + // add new elements to the sparsity pattern), // and the second one is when the pattern is - // already fixed. In the former case, we + // already fixed. In the former case, we // add the possibility to insert new values, // and in the second we just replace // data. - if (row_map.MyGID(row) == true) + if (row_partitioner().MyGID(row) == true) { if (matrix->Filled() == false) { - ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(row, n_columns, + ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(row, n_columns, const_cast(col_value_ptr), col_index_ptr); // When inserting elements, we do // not want to create exceptions in // the case when inserting non-local - // data (since that's what we want + // data (since that's what we want // to do right now). if (ierr > 0) ierr = 0; } else - ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row, n_columns, + ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row, n_columns, const_cast(col_value_ptr), col_index_ptr); } @@ -2487,20 +2772,20 @@ namespace TrilinosWrappers // which is very unefficient if writing // one element at a time). compressed = false; - + if (matrix->Filled() == false) { - ierr = matrix->InsertGlobalValues (1, (int*)&row, + ierr = matrix->InsertGlobalValues (1, (int*)&row, n_columns, col_index_ptr, - &col_value_ptr, + &col_value_ptr, Epetra_FECrsMatrix::ROW_MAJOR); if (ierr > 0) ierr = 0; } else - ierr = matrix->ReplaceGlobalValues (1, (int*)&row, + ierr = matrix->ReplaceGlobalValues (1, (int*)&row, n_columns, col_index_ptr, - &col_value_ptr, + &col_value_ptr, Epetra_FECrsMatrix::ROW_MAJOR); } @@ -2517,7 +2802,7 @@ namespace TrilinosWrappers const TrilinosScalar value) { - Assert (numbers::is_finite(value), + Assert (numbers::is_finite(value), ExcMessage("The given value is not finite but either " "infinite or Not A Number (NaN)")); @@ -2536,13 +2821,13 @@ namespace TrilinosWrappers if (last_action == Insert) { int ierr; - ierr = matrix->GlobalAssemble(col_map, row_map, false); + ierr = matrix->GlobalAssemble(*column_space_map, + row_partitioner(), false); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + Assert (ierr == 0, ExcTrilinosError(ierr)); } - if (last_action != Add) - last_action = Add; + last_action = Add; return; } @@ -2563,7 +2848,7 @@ namespace TrilinosWrappers Assert (values.m() == values.n(), ExcNotQuadratic()); for (unsigned int i=0; iGlobalAssemble(col_map, row_map, false); + ierr = matrix->GlobalAssemble(*column_space_map, + row_partitioner(), false); AssertThrow (ierr == 0, ExcTrilinosError(ierr)); } - if (last_action != Add) - last_action = Add; + last_action = Add; int * col_index_ptr; TrilinosScalar const* col_value_ptr; @@ -2674,7 +2959,7 @@ namespace TrilinosWrappers // can directly call the Epetra_CrsMatrix // input function, which is much faster // than the Epetra_FECrsMatrix function. - if (row_map.MyGID(row) == true) + if (row_partitioner().MyGID(row) == true) { ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row, n_columns, const_cast(col_value_ptr), @@ -2694,9 +2979,9 @@ namespace TrilinosWrappers // one element at a time). compressed = false; - ierr = matrix->SumIntoGlobalValues (1, (int*)&row, n_columns, + ierr = matrix->SumIntoGlobalValues (1, (int*)&row, n_columns, col_index_ptr, - &col_value_ptr, + &col_value_ptr, Epetra_FECrsMatrix::ROW_MAJOR); } @@ -2705,25 +2990,25 @@ namespace TrilinosWrappers { std::cout << "------------------------------------------" << std::endl; - std::cout << "Got error " << ierr << " in row " << row - << " of proc " << row_map.Comm().MyPID() + std::cout << "Got error " << ierr << " in row " << row + << " of proc " << row_partitioner().Comm().MyPID() << " when trying to add the columns:" << std::endl; for (int i=0; i= 0, ExcTrilinosError(ierr)); +#endif + Assert (ierr >= 0, ExcTrilinosError(ierr)); } @@ -2765,7 +3050,7 @@ namespace TrilinosWrappers unsigned int begin, end; begin = matrix -> RowMap().MinMyGID(); end = matrix -> RowMap().MaxMyGID()+1; - + return std::make_pair (begin, end); } @@ -2780,25 +3065,83 @@ namespace TrilinosWrappers + template + inline + void SparseMatrix::reinit (const IndexSet ¶llel_partitioning, + const SparsityType &sparsity_pattern, + const MPI_Comm &communicator, + const bool exchange_data) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, false); + reinit (map, map, sparsity_pattern, exchange_data); + } + + + + template + inline + void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityType &sparsity_pattern, + const MPI_Comm &communicator, + const bool exchange_data) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, sparsity_pattern, exchange_data); + } + + + + template + inline + void SparseMatrix::reinit (const IndexSet ¶llel_partitioning, + const ::dealii::SparseMatrix &sparse_matrix, + const MPI_Comm &communicator, + const double drop_tolerance, + const bool copy_values) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, false); + reinit (map, map, sparse_matrix, drop_tolerance, copy_values); + } + + + + template + inline + void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const ::dealii::SparseMatrix &sparse_matrix, + const MPI_Comm &communicator, + const double drop_tolerance, + const bool copy_values) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, sparse_matrix, drop_tolerance, copy_values); + } + + + inline TrilinosScalar SparseMatrix::l1_norm () const { - if (matrix->Filled() == false) - matrix->GlobalAssemble(col_map, row_map, true); - + Assert (matrix->Filled(), ExcMatrixNotCompressed()); return matrix->NormOne(); } - - + + inline TrilinosScalar SparseMatrix::linfty_norm () const { - if (matrix->Filled() == false) - matrix->GlobalAssemble(col_map, row_map, true); - + Assert (matrix->Filled(), ExcMatrixNotCompressed()); return matrix->NormInf(); } @@ -2808,9 +3151,7 @@ namespace TrilinosWrappers TrilinosScalar SparseMatrix::frobenius_norm () const { - if (matrix->Filled() == false) - matrix->GlobalAssemble(col_map, row_map, true); - + Assert (matrix->Filled(), ExcMatrixNotCompressed()); return matrix->NormFrobenius(); } @@ -2850,16 +3191,14 @@ namespace TrilinosWrappers const VectorBase &src) const { Assert (&src != &dst, ExcSourceEqualsDestination()); - - if (matrix->Filled() == false) - matrix->GlobalAssemble(col_map, row_map, true); + Assert (matrix->Filled(), ExcMatrixNotCompressed()); Assert (src.vector_partitioner().SameAs(matrix->DomainMap()) == true, ExcMessage ("Column map of matrix does not fit with vector map!")); Assert (dst.vector_partitioner().SameAs(matrix->RangeMap()) == true, ExcMessage ("Row map of matrix does not fit with vector map!")); - const int ierr = matrix->Multiply (false, src.trilinos_vector(), + const int ierr = matrix->Multiply (false, src.trilinos_vector(), dst.trilinos_vector()); Assert (ierr == 0, ExcTrilinosError(ierr)); } @@ -2872,16 +3211,14 @@ namespace TrilinosWrappers const VectorBase &src) const { Assert (&src != &dst, ExcSourceEqualsDestination()); - - if (matrix->Filled() == false) - matrix->GlobalAssemble(col_map, row_map, true); + Assert (matrix->Filled(), ExcMatrixNotCompressed()); Assert (src.vector_partitioner().SameAs(matrix->RangeMap()) == true, ExcMessage ("Column map of matrix does not fit with vector map!")); Assert (dst.vector_partitioner().SameAs(matrix->DomainMap()) == true, ExcMessage ("Row map of matrix does not fit with vector map!")); - const int ierr = matrix->Multiply (true, src.trilinos_vector(), + const int ierr = matrix->Multiply (true, src.trilinos_vector(), dst.trilinos_vector()); Assert (ierr == 0, ExcTrilinosError(ierr)); } @@ -2927,9 +3264,8 @@ namespace TrilinosWrappers TrilinosScalar SparseMatrix::matrix_norm_square (const VectorBase &v) const { - Assert (row_map.SameAs(col_map), - ExcDimensionMismatch(row_map.NumGlobalElements(), - col_map.NumGlobalElements())); + Assert (row_partitioner().SameAs(domain_partitioner()), + ExcNotQuadratic()); temp_vector.reinit(v); @@ -2944,9 +3280,8 @@ namespace TrilinosWrappers SparseMatrix::matrix_scalar_product (const VectorBase &u, const VectorBase &v) const { - Assert (row_map.SameAs(col_map), - ExcDimensionMismatch(row_map.NumGlobalElements(), - col_map.NumGlobalElements())); + Assert (row_partitioner().SameAs(domain_partitioner()), + ExcNotQuadratic()); temp_vector.reinit(v); @@ -3022,7 +3357,7 @@ namespace TrilinosWrappers return matrix->ColMap(); } - + inline void @@ -3032,7 +3367,7 @@ namespace TrilinosWrappers } - + inline void SparseMatrix::prepare_set() @@ -3040,9 +3375,10 @@ namespace TrilinosWrappers //nothing to do here } - -#endif // DOXYGEN + +#endif // DOXYGEN + } diff --git a/deal.II/lac/include/lac/trilinos_sparsity_pattern.h b/deal.II/lac/include/lac/trilinos_sparsity_pattern.h index 00840e3b2e..dac9686af4 100644 --- a/deal.II/lac/include/lac/trilinos_sparsity_pattern.h +++ b/deal.II/lac/include/lac/trilinos_sparsity_pattern.h @@ -15,17 +15,19 @@ #include -#include -#include -#include -#include -#include +#ifdef DEAL_II_USE_TRILINOS -#include -#include +# include +# include +# include -#ifdef DEAL_II_USE_TRILINOS +# include +# include +# include + +# include +# include # include # include @@ -109,7 +111,7 @@ namespace TrilinosWrappers << " of a distributed sparsity pattern, " << " but only rows " << arg2 << " through " << arg3 << " are stored locally and can be accessed."); - + private: /** * The matrix accessed. @@ -152,7 +154,7 @@ namespace TrilinosWrappers * this data if necessary. */ std_cxx1x::shared_ptr > colnum_cache; - + /** * Discard the old row caches * (they may still be used by @@ -169,19 +171,19 @@ namespace TrilinosWrappers */ friend class const_iterator; }; - + public: - + /** * Constructor. Create an * iterator into the matrix @p * matrix for the given row and * the index within it. - */ + */ const_iterator (const SparsityPattern *sparsity_pattern, const unsigned int row, const unsigned int index); - + /** * Prefix increment. */ @@ -232,7 +234,7 @@ namespace TrilinosWrappers << "Attempt to access element " << arg2 << " of row " << arg1 << " which doesn't have that many elements."); - + private: /** * Store an object of the @@ -242,16 +244,16 @@ namespace TrilinosWrappers friend class TrilinosWrappers::SparsityPattern; }; - + } - - + + /** * This class implements a wrapper class to use the Trilinos distributed * sparsity pattern class Epetra_FECrsGraph. This class is designed to be - * used for construction of parallel Trilinos matrices. The functionality of + * used for construction of %parallel Trilinos matrices. The functionality of * this class is modeled after the existing sparsity pattern classes, with - * the difference that this class can work fully in parallel according to a + * the difference that this class can work fully in %parallel according to a * partitioning of the sparsity pattern rows. * * This class has many similarities to the compressed sparsity pattern @@ -261,7 +263,7 @@ namespace TrilinosWrappers * previously reserved for it. However, it also has a method * SparsityPattern::compress(), that finalizes the pattern and enables its * use with Trilinos sparse matrices. - * + * * @ingroup TrilinosWrappers * @ingroup Sparsity * @author Martin Kronbichler, 2008 @@ -275,9 +277,9 @@ namespace TrilinosWrappers * iterator class. */ typedef SparsityPatternIterators::const_iterator const_iterator; - + /** - * @name Constructors and initalization. + * @name Basic constructors and initalization. */ //@{ /** @@ -287,24 +289,201 @@ namespace TrilinosWrappers */ SparsityPattern (); + /** + * Generate a sparsity pattern that is + * completely stored locally, having + * $m$ rows and $n$ columns. The + * resulting matrix will be completely + * stored locally, too. + * + * It is possible to specify the + * number of columns entries per row + * using the optional @p + * n_entries_per_row + * argument. However, this value does + * not need to be accurate or even + * given at all, since one does + * usually not have this kind of + * information before building the + * sparsity pattern (the usual case + * when the function + * DoFTools::make_sparsity_pattern() + * is called). The entries are + * allocated dynamically in a similar + * manner as for the deal.II + * CompressedSparsityPattern + * classes. However, a good estimate + * will reduce the setup time of the + * sparsity pattern. + */ + SparsityPattern (const unsigned int m, + const unsigned int n, + const unsigned int n_entries_per_row = 0); + + /** + * Generate a sparsity pattern that is + * completely stored locally, having + * $m$ rows and $n$ columns. The + * resulting matrix will be completely + * stored locally, too. + * + * The vector + * n_entries_per_row + * specifies the number of entries in + * each row (an information usually + * not available, though). + */ + SparsityPattern (const unsigned int m, + const unsigned int n, + const std::vector &n_entries_per_row); + + /** + * Copy constructor. Sets the calling + * sparsity pattern to be the same as + * the input sparsity pattern. + */ + SparsityPattern (const SparsityPattern &input_sparsity_pattern); + + /** + * Destructor. Made virtual so that + * one can use pointers to this + * class. + */ + virtual ~SparsityPattern (); + + /** + * Initialize a sparsity pattern that + * is completely stored locally, + * having $m$ rows and $n$ + * columns. The resulting matrix will + * be completely stored locally. + * + * The number of columns entries per + * row is specified as the maximum + * number of entries argument. This + * does not need to be an accurate + * number since the entries are + * allocated dynamically in a similar + * manner as for the deal.II + * CompressedSparsityPattern classes, + * but a good estimate will reduce + * the setup time of the sparsity + * pattern. + */ + void + reinit (const unsigned int m, + const unsigned int n, + const unsigned int n_entries_per_row = 0); + + /** + * Initialize a sparsity pattern that + * is completely stored locally, + * having $m$ rows and $n$ columns. The + * resulting matrix will be + * completely stored locally. + * + * The vector + * n_entries_per_row + * specifies the number of entries in + * each row. + */ + void + reinit (const unsigned int m, + const unsigned int n, + const std::vector &n_entries_per_row); + + /** + * Copy function. Sets the calling + * sparsity pattern to be the same as + * the input sparsity pattern. + */ + void + copy_from (const SparsityPattern &input_sparsity_pattern); + + /** + * Copy function from one of the + * deal.II sparsity patterns. If used + * in parallel, this function uses an + * ad-hoc partitioning of the rows + * and columns. + */ + template + void + copy_from (const SparsityType &nontrilinos_sparsity_pattern); + + /** + * Copy operator. This operation is + * only allowed for empty objects, to + * avoid potentially very costly + * operations automatically + * synthesized by the compiler. Use + * copy_from() instead if you know + * that you really want to copy a + * sparsity pattern with non-trivial + * content. + */ + void operator = (const SparsityPattern &input_sparsity_pattern); + + /** + * Release all memory and return to a + * state just like after having + * called the default constructor. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + void clear (); + + /** + * In analogy to our own + * SparsityPattern class, this + * function compresses the sparsity + * pattern and allows the resulting + * pattern to be used for actually + * generating a (Trilinos-based) + * matrix. This function also + * exchanges non-local data that + * might have accumulated during the + * addition of new elements. This + * function must therefore be called + * once the structure is fixed. This + * is a collective operation, i.e., + * it needs to be run on all + * processors when used in parallel. + */ + void compress (); +//@} +/** + * @name Constructors and initialization using an Epetra_Map description + */ +//@{ + /** * Constructor for a square sparsity - * pattern using an Epetra_Map and - * the number of nonzero entries in - * the rows of the sparsity - * pattern. Note that this number + * pattern using an Epetra_map for + * the description of the %parallel + * partitioning. Moreover, the number + * of nonzero entries in the rows of + * the sparsity pattern can be + * specified. Note that this number * does not need to be exact, and it - * is even allowed that the actual + * is allowed that the actual * sparsity structure has more * nonzero entries than specified in - * the constructor. However it is - * still advantageous to provide good - * estimates here since this will - * considerably increase the - * performance when creating the - * sparsity pattern. + * the constructor (the usual case + * when the function + * DoFTools::make_sparsity_pattern() + * is called). However it is still + * advantageous to provide good + * estimates here since a good value + * will avoid repeated allocation of + * memory, which considerably + * increases the performance when + * creating the sparsity pattern. */ - SparsityPattern (const Epetra_Map &InputMap, + SparsityPattern (const Epetra_Map ¶llel_partitioning, const unsigned int n_entries_per_row = 0); /** @@ -326,135 +505,79 @@ namespace TrilinosWrappers * sparsity pattern is designed to * describe. */ - SparsityPattern (const Epetra_Map &InputMap, + SparsityPattern (const Epetra_Map ¶llel_partitioning, const std::vector &n_entries_per_row); /** * This constructor is similar to the * one above, but it now takes two * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular sparsity pattern, - * where one map describes the - * parallel partitioning of the dofs - * associated with the sparsity - * pattern rows and the other one of - * the sparsity pattern columns. Note - * that there is no real parallelism - * along the columns – the - * processor that owns a certain row - * always owns all the column - * elements, no matter how far they - * might be spread out. The second + * columns. This interface is meant to + * be used for generating rectangular + * sparsity pattern, where one map + * describes the %parallel partitioning + * of the dofs associated with the + * sparsity pattern rows and the other + * one of the sparsity pattern + * columns. Note that there is no real + * parallelism along the columns + * – the processor that owns a + * certain row always owns all the + * column elements, no matter how far + * they might be spread out. The second * Epetra_Map is only used to specify * the number of columns and for - * internal arragements when doing - * matrix-vector products with - * vectors based on that column map. + * specifying the correct domain space + * when performing matrix-vector + * products with vectors based on the + * same column map. * - * The number of columns entries - * per row is specified as the - * maximum number of entries - * argument. + * The number of columns entries per + * row is specified as the maximum + * number of entries argument. */ - SparsityPattern (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + SparsityPattern (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, const unsigned int n_entries_per_row = 0); /** * This constructor is similar to the * one above, but it now takes two * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular matrices, where one - * map specifies the parallel - * distribution of rows and the - * second one specifies the + * columns. This interface is meant to + * be used for generating rectangular + * matrices, where one map specifies + * the %parallel distribution of rows + * and the second one specifies the * distribution of degrees of freedom - * associated with matrix - * columns. This second map is - * however not used for the - * distribution of the columns + * associated with matrix columns. This + * second map is however not used for + * the distribution of the columns * themselves – rather, all - * column elements of a row are - * stored on the same processor. The - * vector n_entries_per_row - * specifies the number of entries in - * each row of the newly generated - * matrix. + * column elements of a row are stored + * on the same processor. The vector + * n_entries_per_row specifies + * the number of entries in each row of + * the newly generated matrix. */ - SparsityPattern (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + SparsityPattern (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, const std::vector &n_entries_per_row); - /** - * Generate a sparsity pattern that - * is completely stored locally, - * having $m$ rows and $n$ columns. The - * resulting matrix will be - * completely stored locally. - * - * The number of columns entries per - * row is specified as the maximum - * number of entries argument. As - * above, this does not need to be an - * accurate number since the entries - * are allocated dynamically in a - * similar manner as for the deal.II - * CompressedSparsityPattern classes, - * but a good estimate will reduce - * the setup time of the sparsity - * pattern. - */ - SparsityPattern (const unsigned int m, - const unsigned int n, - const unsigned int n_entries_per_row = 0); - - /** - * Generate a sparsity pattern that - * is completely stored locally, - * having $m$ rows and $n$ columns. The - * resulting matrix will be - * completely stored locally. - * - * The vector - * n_entries_per_row - * specifies the number of entries in - * each row. - */ - SparsityPattern (const unsigned int m, - const unsigned int n, - const std::vector &n_entries_per_row); - - /** - * Copy constructor. Sets the calling - * sparsity pattern to be the same as - * the input sparsity pattern. - */ - SparsityPattern (const SparsityPattern &SP); - - /** - * Destructor. Made virtual so that - * one can use pointers to this - * class. - */ - virtual ~SparsityPattern (); - /** * Reinitialization function for - * generating a square sparsity - * pattern using an Epetra_Map and - * the number of nonzero entries in - * the rows of the sparsity - * pattern. Note that this number - * does not need to be exact, and it - * is even allowed that the actual - * sparsity structure has more + * generating a square sparsity pattern + * using an Epetra_Map for the + * description of the %parallel + * partitioning and the number of + * nonzero entries in the rows of the + * sparsity pattern. Note that this + * number does not need to be exact, + * and it is even allowed that the + * actual sparsity structure has more * nonzero entries than specified in - * the constructor. However it is - * still advantageous to provide good + * the constructor. However it is still + * advantageous to provide good * estimates here since this will * considerably increase the * performance when creating the @@ -466,8 +589,8 @@ namespace TrilinosWrappers * can be used by the respective * add() function. */ - void - reinit (const Epetra_Map &InputMap, + void + reinit (const Epetra_Map ¶llel_partitioning, const unsigned int n_entries_per_row = 0); /** @@ -488,8 +611,8 @@ namespace TrilinosWrappers * which the sparsity pattern is * designed to describe. */ - void - reinit (const Epetra_Map &InputMap, + void + reinit (const Epetra_Map ¶llel_partitioning, const std::vector &n_entries_per_row); /** @@ -500,7 +623,7 @@ namespace TrilinosWrappers * meant to be used for generating * rectangular sparsity pattern, * where one map describes the - * parallel partitioning of the dofs + * %parallel partitioning of the dofs * associated with the sparsity * pattern rows and the other one of * the sparsity pattern columns. Note @@ -520,9 +643,9 @@ namespace TrilinosWrappers * row is specified by the argument * n_entries_per_row. */ - void - reinit (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + void + reinit (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, const unsigned int n_entries_per_row = 0); /** @@ -532,7 +655,7 @@ namespace TrilinosWrappers * and columns. This interface is * meant to be used for generating * rectangular matrices, where one - * map specifies the parallel + * map specifies the %parallel * distribution of rows and the * second one specifies the * distribution of degrees of freedom @@ -548,139 +671,307 @@ namespace TrilinosWrappers * each row of the newly generated * matrix. */ - void - reinit (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, - const std::vector &n_entries_per_row); - - /** - * Initialize a sparsity pattern that - * is completely stored locally, - * having $m$ rows and $n$ columns. The - * resulting matrix will be - * completely stored locally. - * - * The number of columns entries per - * row is specified as the maximum - * number of entries argument. As - * above, this does not need to be an - * accurate number since the entries - * are allocated dynamically in a - * similar manner as for the deal.II - * CompressedSparsityPattern classes, - * but a good estimate will reduce - * the setup time of the sparsity - * pattern. - */ - void - reinit (const unsigned int m, - const unsigned int n, - const unsigned int n_entries_per_row = 0); - - /** - * Initialize a sparsity pattern that - * is completely stored locally, - * having $m$ rows and $n$ columns. The - * resulting matrix will be - * completely stored locally. - * - * The vector - * n_entries_per_row - * specifies the number of entries in - * each row. - */ - void - reinit (const unsigned int m, - const unsigned int n, + void + reinit (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, const std::vector &n_entries_per_row); /** * Reinit function. Takes one of the * deal.II sparsity patterns and a - * parallel partitioning of the rows + * %parallel partitioning of the rows * and columns for initializing the - * current Trilinos sparsity pattern. + * current Trilinos sparsity + * pattern. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. */ template - void - reinit (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, - const SparsityType &SP); + void + reinit (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const bool exchange_data = false); /** * Reinit function. Takes one of the * deal.II sparsity patterns and a - * parallel partitioning of the rows + * %parallel partitioning of the rows * and columns for initializing the - * current Trilinos sparsity pattern. + * current Trilinos sparsity + * pattern. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. */ template - void - reinit (const Epetra_Map &InputMap, - const SparsityType &SP); + void + reinit (const Epetra_Map ¶llel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const bool exchange_data = false); +//@} +/** + * @name Constructors and initialization using an IndexSet description + */ +//@{ /** - * Copy function. Sets the calling - * sparsity pattern to be the same as - * the input sparsity pattern. + * Constructor for a square sparsity + * pattern using an IndexSet and an + * MPI communicator for the + * description of the %parallel + * partitioning. Moreover, the number + * of nonzero entries in the rows of + * the sparsity pattern can be + * specified. Note that this number + * does not need to be exact, and it + * is even allowed that the actual + * sparsity structure has more + * nonzero entries than specified in + * the constructor. However it is + * still advantageous to provide good + * estimates here since a good value + * will avoid repeated allocation of + * memory, which considerably + * increases the performance when + * creating the sparsity pattern. */ - void - copy_from (const SparsityPattern &SP); + SparsityPattern (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_entries_per_row = 0); /** - * Copy function from one of the - * deal.II sparsity patterns. If used - * in parallel, this function uses an - * ad-hoc partitioning of the rows - * and columns. + * Same as before, but now use the + * exact number of nonzeros in each m + * row. Since we know the number of + * elements in the sparsity pattern + * exactly in this case, we can + * already allocate the right amount + * of memory, which makes the + * creation process by the respective + * SparsityPattern::reinit call + * considerably faster. However, this + * is a rather unusual situation, + * since knowing the number of + * entries in each row is usually + * connected to knowing the indices + * of nonzero entries, which the + * sparsity pattern is designed to + * describe. */ - template - void - copy_from (const SparsityType &SP); + SparsityPattern (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); - /** - * Copy operator. This operation is - * only allowed for empty objects, to - * avoid potentially very costly - * operations automatically synthesized - * by the compiler. Use copy_from() - * instead if you know that you really - * want to copy a sparsity pattern with - * non-trivial content. - */ - void operator = (const SparsityPattern &sp); - /** - * Release all memory and - * return to a state just like - * after having called the - * default constructor. + * This constructor is similar to the + * one above, but it now takes two + * different index sets to describe the + * %parallel partitioning of rows and + * columns. This interface is meant to + * be used for generating rectangular + * sparsity pattern. Note that there is + * no real parallelism along the + * columns – the processor that + * owns a certain row always owns all + * the column elements, no matter how + * far they might be spread out. The + * second Epetra_Map is only used to + * specify the number of columns and + * for internal arragements when doing + * matrix-vector products with vectors + * based on that column map. * - * This is a - * collective operation that - * needs to be called on all - * processors in order to avoid a - * dead lock. + * The number of columns entries per + * row is specified as the maximum + * number of entries argument. */ - void clear (); + SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_entries_per_row = 0); /** - * In analogy to our own - * SparsityPattern class, this - * function compresses the sparsity - * pattern and allows the resulting - * pattern to be used for actually - * generating a matrix. This function - * also exchanges non-local data that - * might have accumulated during the - * addition of new elements. This - * function must therefore be called - * once the structure is fixed. This - * is a collective operation, i.e., - * it needs to be run on all - * processors when used in parallel. + * This constructor is similar to the + * one above, but it now takes two + * different index sets for rows and + * columns. This interface is meant to + * be used for generating rectangular + * matrices, where one map specifies + * the %parallel distribution of rows + * and the second one specifies the + * distribution of degrees of freedom + * associated with matrix columns. This + * second map is however not used for + * the distribution of the columns + * themselves – rather, all + * column elements of a row are stored + * on the same processor. The vector + * n_entries_per_row specifies + * the number of entries in each row of + * the newly generated matrix. */ - void compress (); + SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); + + /** + * Reinitialization function for + * generating a square sparsity + * pattern using an IndexSet and an + * MPI communicator for the + * description of the %parallel + * partitioning and the number of + * nonzero entries in the rows of the + * sparsity pattern. Note that this + * number does not need to be exact, + * and it is even allowed that the + * actual sparsity structure has more + * nonzero entries than specified in + * the constructor. However it is + * still advantageous to provide good + * estimates here since this will + * considerably increase the + * performance when creating the + * sparsity pattern. + * + * This function does not create any + * entries by itself, but provides + * the correct data structures that + * can be used by the respective + * add() function. + */ + void + reinit (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_entries_per_row = 0); + + /** + * Same as before, but now use the + * exact number of nonzeros in each m + * row. Since we know the number of + * elements in the sparsity pattern + * exactly in this case, we can + * already allocate the right amount + * of memory, which makes process of + * adding entries to the sparsity + * pattern considerably + * faster. However, this is a rather + * unusual situation, since knowing + * the number of entries in each row + * is usually connected to knowing + * the indices of nonzero entries, + * which the sparsity pattern is + * designed to describe. + */ + void + reinit (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); + + /** + * This reinit function is similar to + * the one above, but it now takes + * two different index sets for rows + * and columns. This interface is + * meant to be used for generating + * rectangular sparsity pattern, + * where one index set describes the + * %parallel partitioning of the dofs + * associated with the sparsity + * pattern rows and the other one of + * the sparsity pattern columns. Note + * that there is no real parallelism + * along the columns – the + * processor that owns a certain row + * always owns all the column + * elements, no matter how far they + * might be spread out. The second + * IndexSet is only used to specify + * the number of columns and for + * internal arragements when doing + * matrix-vector products with + * vectors based on an EpetraMap + * based on that IndexSet. + * + * The number of columns entries per + * row is specified by the argument + * n_entries_per_row. + */ + void + reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_entries_per_row = 0); + + /** + * Same as before, but now using a + * vector n_entries_per_row + * for specifying the number of + * entries in each row of the + * sparsity pattern. + */ + void + reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); + + /** + * Reinit function. Takes one of the + * deal.II sparsity patterns and the + * %parallel partitioning of the rows + * and columns specified by two index + * sets and a %parallel communicator + * for initializing the current + * Trilinos sparsity pattern. The + * optional argument @p exchange_data + * can be used for reinitialization + * with a sparsity pattern that is + * not fully constructed. This + * feature is only implemented for + * input sparsity patterns of type + * CompressedSimpleSparsityPattern. + */ + template + void + reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool exchange_data = false); + + /** + * Reinit function. Takes one of the + * deal.II sparsity patterns and a + * %parallel partitioning of the rows + * and columns for initializing the + * current Trilinos sparsity + * pattern. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. + */ + template + void + reinit (const IndexSet ¶llel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool exchange_data = false); //@} /** * @name Information on the sparsity pattern @@ -702,7 +993,7 @@ namespace TrilinosWrappers * processor. */ unsigned int max_entries_per_row () const; - + /** * Return the number of rows in this * sparsity pattern. @@ -957,11 +1248,19 @@ namespace TrilinosWrappers void write_ascii (); /** - * Print the sparsity pattern to the - * given stream, using the format - * (line,col). + * Print (the locally owned part of) + * the sparsity pattern to the given + * stream, using the format + * (line,col). The optional + * flag outputs the sparsity pattern + * in Trilinos style, where even the + * according processor number is + * printed to the stream, as well as + * a summary before actually writing + * the entries. */ - void print (std::ostream &out) const; + void print (std::ostream &out, + const bool write_extended_trilinos_info = false) const; /** * Print the sparsity of the matrix @@ -989,12 +1288,6 @@ namespace TrilinosWrappers * plot command. */ void print_gnuplot (std::ostream &out) const; - - // TODO: Write an overloading - // of the operator << for output. - // Since the underlying Trilinos - // object supports it, this should - // be very easy. //@} /** @addtogroup Exceptions @@ -1040,30 +1333,8 @@ namespace TrilinosWrappers << "/" << arg2 << ")" << " of a sparse matrix, but it appears to not" << " exist in the Trilinos sparsity pattern."); - //@} + //@} private: - /** - * A pointer to the communicator used - * for all operations in this object. - * - * Note that we create a new - * communicator (with a unique MPI ID) - * for each object if we are running in - * parallel. - */ - boost::scoped_ptr communicator; - - /** - * Epetra Trilinos - * mapping of the matrix rows - * that assigns parts of the - * matrix to the individual - * processes. This map is - * provided either via the - * constructor or in a reinit - * function. - */ - Epetra_Map row_map; /** * Pointer to the user-supplied @@ -1072,7 +1343,7 @@ namespace TrilinosWrappers * assigns parts of the matrix * to the individual processes. */ - Epetra_Map col_map; + std::auto_ptr column_space_map; /** * A boolean variable to hold @@ -1163,20 +1434,20 @@ namespace TrilinosWrappers const_iterator & const_iterator::operator++ () { - Assert (accessor.a_row < accessor.sparsity_pattern->n_rows(), + Assert (accessor.a_row < accessor.sparsity_pattern->n_rows(), ExcIteratorPastEnd()); ++accessor.a_index; // If at end of line: do one - // step, then cycle until we + // step, then cycle until we // find a row with a nonzero // number of entries. if (accessor.a_index >= accessor.colnum_cache->size()) { accessor.a_index = 0; ++accessor.a_row; - + while ((accessor.a_row < accessor.sparsity_pattern->n_rows()) && (accessor.sparsity_pattern->row_length(accessor.a_row) == 0)) @@ -1248,9 +1519,9 @@ namespace TrilinosWrappers (accessor.row() == other.accessor.row() && accessor.index() < other.accessor.index())); } - + } - + inline @@ -1296,7 +1567,7 @@ namespace TrilinosWrappers for (unsigned int i=r+1; i 0) return const_iterator(this, i, 0); - + // if there is no such line, then take the // end iterator of the matrix return end(); @@ -1311,7 +1582,7 @@ namespace TrilinosWrappers int begin, end; begin = graph->RowMap().MinMyGID(); end = graph->RowMap().MaxMyGID()+1; - + return ((index >= static_cast(begin)) && (index < static_cast(end))); } @@ -1361,7 +1632,7 @@ namespace TrilinosWrappers const int n_cols = static_cast(end - begin); compressed = false; - const int ierr = graph->InsertGlobalIndices (1, (int*)&row, + const int ierr = graph->InsertGlobalIndices (1, (int*)&row, n_cols, col_index_ptr); AssertThrow (ierr >= 0, ExcTrilinosError(ierr)); @@ -1413,15 +1684,158 @@ namespace TrilinosWrappers } + inline const Epetra_Comm & SparsityPattern::trilinos_communicator () const { - return *communicator; + return graph->RangeMap().Comm(); } -#endif // DOXYGEN + + inline + SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const unsigned int n_entries_per_row) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + inline + SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + inline + SparsityPattern::SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const unsigned int n_entries_per_row) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + + inline + SparsityPattern:: + SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + + inline + void + SparsityPattern::reinit (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const unsigned int n_entries_per_row) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + inline + void SparsityPattern::reinit (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + inline + void SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const unsigned int n_entries_per_row) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + inline + void + SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + + template + inline + void + SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const MPI_Comm &communicator, + const bool exchange_data) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, nontrilinos_sparsity_pattern, exchange_data); + } + + + + template + inline + void + SparsityPattern::reinit (const IndexSet ¶llel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const MPI_Comm &communicator, + const bool exchange_data) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, nontrilinos_sparsity_pattern, exchange_data); + } + +#endif // DOXYGEN } diff --git a/deal.II/lac/include/lac/trilinos_vector.h b/deal.II/lac/include/lac/trilinos_vector.h index 7838e7691a..968f77ee24 100644 --- a/deal.II/lac/include/lac/trilinos_vector.h +++ b/deal.II/lac/include/lac/trilinos_vector.h @@ -13,17 +13,18 @@ #ifndef __deal2__trilinos_vector_h #define __deal2__trilinos_vector_h -#include -#include -#include -#include -#include -#include -#include +#include #ifdef DEAL_II_USE_TRILINOS +# include +# include +# include +# include +# include +# include + # include "Epetra_Map.h" # include "Epetra_LocalMap.h" @@ -39,6 +40,7 @@ template class Vector; */ namespace TrilinosWrappers { + class SparseMatrix; /** * Namespace for Trilinos vector classes that work in parallel over * MPI. This namespace is restricted to vectors only, whereas matrices @@ -164,6 +166,10 @@ namespace TrilinosWrappers class Vector : public VectorBase { public: +/** + * @name Basic constructors and initalization. + */ +//@{ /** * Default constructor that * generates an empty (zero size) @@ -176,64 +182,17 @@ namespace TrilinosWrappers */ Vector (); - /** - * This constructor takes an - * Epetra_Map that already knows - * how to distribute the - * individual components among - * the MPI processors. Since it - * also includes information - * about the size of the vector, - * this is all we need to - * generate a parallel vector. - */ - Vector (const Epetra_Map &InputMap); - /** * Copy constructor using the * given vector. */ Vector (const Vector &V); - /** - * Copy constructor from the - * TrilinosWrappers vector - * class. Since a vector of this - * class does not necessarily - * need to be distributed among - * processes, the user needs to - * supply us with an Epetra_Map - * that sets the partitioning - * details. - */ - explicit Vector (const Epetra_Map &InputMap, - const VectorBase &v); - - /** - * Copy-constructor from deal.II - * vectors. Sets the dimension to that - * of the given vector, and copies all - * elements. - */ - template - explicit Vector (const Epetra_Map &InputMap, - const dealii::Vector &v); - /** * Destructor. */ ~Vector (); - /** - * Reinit functionality. This - * function destroys the old - * vector content and generates a - * new one based on the input - * map. - */ - void reinit (const Epetra_Map &input_map, - const bool fast = false); - /** * Reinit functionality. This * function sets the calling vector @@ -376,30 +335,111 @@ namespace TrilinosWrappers void import_nonlocal_data_for_fe (const dealii::TrilinosWrappers::SparseMatrix &matrix, const Vector &vector); +//@} +/** + * @name Initialization with an Epetra_Map + */ +//@{ + /** + * This constructor takes an + * Epetra_Map that already knows + * how to distribute the + * individual components among + * the MPI processors. Since it + * also includes information + * about the size of the vector, + * this is all we need to + * generate a parallel vector. + */ + Vector (const Epetra_Map ¶llel_partitioning); - private: - /** - * A pointer to the communicator used - * for all operations in this object. - * - * Note that we create a new - * communicator (with a unique MPI ID) - * for each object if we are running in - * parallel. - */ - boost::scoped_ptr communicator; + /** + * Copy constructor from the + * TrilinosWrappers vector + * class. Since a vector of this + * class does not necessarily + * need to be distributed among + * processes, the user needs to + * supply us with an Epetra_Map + * that sets the partitioning + * details. + */ + explicit Vector (const Epetra_Map ¶llel_partitioning, + const VectorBase &v); - /** - * The Epetra map is used to map - * (or rather, partition) vector - * data accross multiple - * processes. This is the - * communicator and data - * distribution object common to - * all Trilinos objects used by - * deal.II. - */ - Epetra_Map map; + /** + * Reinit functionality. This + * function destroys the old + * vector content and generates a + * new one based on the input + * map. + */ + void reinit (const Epetra_Map ¶llel_partitioning, + const bool fast = false); + + /** + * Copy-constructor from deal.II + * vectors. Sets the dimension to that + * of the given vector, and copies all + * elements. + */ + template + explicit Vector (const Epetra_Map ¶llel_partitioning, + const dealii::Vector &v); +//@} +/** + * @name Initialization with an IndexSet + */ +//@{ + /** + * This constructor takes an IndexSet + * that defines how to distribute the + * individual components among the + * MPI processors. Since it also + * includes information about the + * size of the vector, this is all we + * need to generate a %parallel + * vector. + */ + Vector (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Copy constructor from the + * TrilinosWrappers vector + * class. Since a vector of this + * class does not necessarily need to + * be distributed among processes, + * the user needs to supply us with + * an IndexSet and an MPI + * communicator that set the + * partitioning details. + */ + explicit Vector (const IndexSet ¶llel_partitioning, + const VectorBase &v, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Copy-constructor from deal.II + * vectors. Sets the dimension to + * that of the given vector, and + * copies all the elements. + */ + template + explicit Vector (const IndexSet ¶llel_partitioning, + const dealii::Vector &v, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Reinit functionality. This + * function destroys the old vector + * content and generates a new one + * based on the input partitioning. + */ + void reinit (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool fast = false); +//@} }; @@ -426,34 +466,33 @@ namespace TrilinosWrappers #ifndef DOXYGEN template - Vector::Vector (const Epetra_Map &InputMap, + Vector::Vector (const Epetra_Map &input_map, const dealii::Vector &v) - : - map (InputMap) { - vector = std::auto_ptr (new Epetra_FEVector(map)); + vector = std::auto_ptr (new Epetra_FEVector(input_map)); - const int min_my_id = map.MinMyGID(); - const int size = map.NumMyElements(); + const int min_my_id = input_map.MinMyGID(); + const int size = input_map.NumMyElements(); - Assert (map.MaxLID() == size-1, - ExcDimensionMismatch(map.MaxLID(), size-1)); + Assert (input_map.MaxLID() == size-1, + ExcDimensionMismatch(input_map.MaxLID(), size-1)); // Need to copy out values, since the // deal.II might not use doubles, so // that a direct access is not possible. - std::vector indices (size); - std::vector values (size); for (int i=0; iReplaceGlobalValues (size, &indices[0], - &values[0]); - AssertThrow (ierr == 0, VectorBase::ExcTrilinosError(ierr)); + template + Vector::Vector (const IndexSet ¶llel_partitioner, + const dealii::Vector &v, + const MPI_Comm &communicator) + { + *this = Vector(parallel_partitioner.make_trilinos_map (communicator, true), + v); } @@ -474,15 +513,17 @@ namespace TrilinosWrappers { if (size() != v.size()) { + *vector = std::auto_ptr + (new Epetra_FEVector(Epetra_Map (v.size(), 0, #ifdef DEAL_II_COMPILER_SUPPORTS_MPI - map = Epetra_Map (v.size(), 0, Epetra_MpiComm(MPI_COMM_SELF)); + Epetra_MpiComm(MPI_COMM_SELF) #else - map = Epetra_Map (v.size(), 0, Epetra_SerialComm()); + Epetra_SerialComm() #endif + ))); } - *this = Vector(map, v); - + reinit (vector_partitioner(), v); return *this; } @@ -492,6 +533,7 @@ namespace TrilinosWrappers } /* end of namespace MPI */ + /** * This class is a specialization of a Trilinos vector to a localized * version. The purpose of this class is to provide a copy interface @@ -626,13 +668,6 @@ namespace TrilinosWrappers */ Vector & operator = (const Vector &V); - - private: - /** - * A map indicating the size of the - * vector. - */ - Epetra_LocalMap map; }; @@ -659,13 +694,8 @@ namespace TrilinosWrappers template Vector::Vector (const dealii::Vector &v) - : -#ifdef DEAL_II_COMPILER_SUPPORTS_MPI - map (v.size(), 0, Epetra_MpiComm(MPI_COMM_SELF)) -#else - map (v.size(), 0, Epetra_SerialComm()) -#endif { + Epetra_LocalMap map ((int)v.size(), 0, Utilities::Trilinos::comm_self()); vector = std::auto_ptr (new Epetra_FEVector(map)); *this = v; } @@ -681,6 +711,8 @@ namespace TrilinosWrappers return *this; } + + template Vector & Vector::operator = (const ::dealii::Vector &v) @@ -688,15 +720,13 @@ namespace TrilinosWrappers if (size() != v.size()) { vector.release(); -#ifdef DEAL_II_COMPILER_SUPPORTS_MPI - map = Epetra_LocalMap (v.size(), 0, Epetra_MpiComm(MPI_COMM_SELF)); -#else - map = Epetra_LocalMap (v.size(), 0, Epetra_SerialComm()); -#endif + Epetra_LocalMap map ((int)v.size(), 0, + Utilities::Trilinos::comm_self()); vector = std::auto_ptr (new Epetra_FEVector(map)); } + Epetra_Map & map = vector_partitioner(); const int min_my_id = map.MinMyGID(); const int size = map.NumMyElements(); @@ -706,17 +736,8 @@ namespace TrilinosWrappers // Need to copy out values, since the // deal.II might not use doubles, so // that a direct access is not possible. - std::vector indices (size); - std::vector values (size); for (int i=0; iReplaceGlobalValues (size, &indices[0], - &values[0]); - AssertThrow (ierr == 0, VectorBase::ExcTrilinosError(ierr)); + (*vector)[0][i] = v(i); return *this; } diff --git a/deal.II/lac/include/lac/trilinos_vector_base.h b/deal.II/lac/include/lac/trilinos_vector_base.h index 85526044cc..938cf36507 100644 --- a/deal.II/lac/include/lac/trilinos_vector_base.h +++ b/deal.II/lac/include/lac/trilinos_vector_base.h @@ -15,16 +15,17 @@ #include -#include -#include -#include - -#include -#include -#include #ifdef DEAL_II_USE_TRILINOS +# include +# include +# include + +# include +# include +# include + # define TrilinosScalar double # include "Epetra_ConfigDefs.h" # ifdef DEAL_II_COMPILER_SUPPORTS_MPI // only if MPI is installed @@ -319,8 +320,26 @@ namespace TrilinosWrappers * vector element-by-element and * before anything else can be * done on it. + * + * The (defaulted) argument can + * be used to specify the + * compress mode + * (Add or + * Insert) in case + * the vector has not been + * written to since the last + * time this function was + * called. The argument is + * ignored if the vector has + * been added or written to + * since the last time + * compress() was called. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + * more information. */ - void compress (); + void compress (const Epetra_CombineMode last_action = Zero); /** * Returns the state of the @@ -827,7 +846,7 @@ namespace TrilinosWrappers * class. */ void print (const char* format = 0) const; - + /** * Print to a stream. @p * precision denotes the desired @@ -1015,27 +1034,7 @@ namespace TrilinosWrappers const VectorReference & VectorReference::operator += (const TrilinosScalar &value) const { - const int local_row = vector.vector->Map().LID(index); - if (local_row == -1) - { - // write the code explicitly here to make - // it faster. - if (vector.last_action != Add) - { - if (vector.last_action == Insert) - vector.vector->GlobalAssemble(Insert); - vector.last_action = Add; - } - - const int ierr = vector.vector->SumIntoGlobalValues (1, - (const int*)(&index), - &value); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - vector.compressed = false; - } - else - (*vector.vector)[0][local_row] += value; - + vector.add (1, &index, &value); return *this; } @@ -1120,11 +1119,13 @@ namespace TrilinosWrappers inline void - VectorBase::compress () + VectorBase::compress (const Epetra_CombineMode given_last_action) { // Now pass over the information about // what we did last to the vector. - const int ierr = vector->GlobalAssemble(last_action); + const int ierr = vector->GlobalAssemble(last_action != Zero ? + last_action : + given_last_action); AssertThrow (ierr == 0, ExcTrilinosError(ierr)); last_action = Zero; @@ -1802,7 +1803,8 @@ namespace TrilinosWrappers { return static_cast(vector->Map()); } - + + #endif // DOXYGEN } diff --git a/deal.II/lac/source/compressed_simple_sparsity_pattern.cc b/deal.II/lac/source/compressed_simple_sparsity_pattern.cc index f5b1d9f0cb..cb8b932238 100644 --- a/deal.II/lac/source/compressed_simple_sparsity_pattern.cc +++ b/deal.II/lac/source/compressed_simple_sparsity_pattern.cc @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006 by the deal.II authors +// Copyright (C) 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -13,9 +13,8 @@ #include +#include -#include -#include #include #include #include @@ -202,11 +201,18 @@ CompressedSimpleSparsityPattern::Line::add_entries (ForwardIterator begin, } +unsigned int +CompressedSimpleSparsityPattern::Line::memory_consumption () const +{ + return entries.capacity()*sizeof(unsigned int)+sizeof(Line); +} + CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern () : rows(0), - cols(0) + cols(0), + rowset(0) {} @@ -216,7 +222,8 @@ CompressedSimpleSparsityPattern (const CompressedSimpleSparsityPattern &s) : Subscriptor(), rows(0), - cols(0) + cols(0), + rowset(0) { Assert (s.rows == 0, ExcInvalidConstructorCall()); Assert (s.cols == 0, ExcInvalidConstructorCall()); @@ -225,12 +232,15 @@ CompressedSimpleSparsityPattern (const CompressedSimpleSparsityPattern &s) CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const unsigned int m, - const unsigned int n) + const unsigned int n, + const IndexSet & rowset_ +) : rows(0), - cols(0) + cols(0), + rowset(0) { - reinit (m,n); + reinit (m,n, rowset_); } @@ -238,7 +248,8 @@ CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const unsigned CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const unsigned int n) : rows(0), - cols(0) + cols(0), + rowset(0) { reinit (n,n); } @@ -261,12 +272,21 @@ CompressedSimpleSparsityPattern::operator = (const CompressedSimpleSparsityPatte void CompressedSimpleSparsityPattern::reinit (const unsigned int m, - const unsigned int n) + const unsigned int n, + const IndexSet & rowset_) { rows = m; cols = n; + rowset=rowset_; + if (rowset.size()==0) + { + rowset.set_size(m); + rowset.add_range(0,m); + } + + Assert( rowset.size()==m, ExcInternalError()); - std::vector new_lines (rows); + std::vector new_lines (rowset.n_elements()); lines.swap (new_lines); } @@ -290,7 +310,7 @@ unsigned int CompressedSimpleSparsityPattern::max_entries_per_row () const { unsigned int m = 0; - for (unsigned int i=0; i(lines[i].entries.size())); } @@ -302,13 +322,16 @@ CompressedSimpleSparsityPattern::max_entries_per_row () const bool CompressedSimpleSparsityPattern::exists (const unsigned int i, - const unsigned int j) const + const unsigned int j) const { Assert (i::const_iterator j=lines[row].entries.begin(); j != lines[row].entries.end(); ++j) // add the transpose entry if // this is not the diagonal - if (row != *j) - add (*j, row); + if (rowindex != *j) + add (*j, rowindex); } } @@ -350,7 +375,7 @@ CompressedSimpleSparsityPattern::print (std::ostream &out) const { for (unsigned int row=0; row::const_iterator j=lines[row].entries.begin(); @@ -370,6 +395,8 @@ CompressedSimpleSparsityPattern::print_gnuplot (std::ostream &out) const { for (unsigned int row=0; row::const_iterator j=lines[row].entries.begin(); j != lines[row].entries.end(); ++j) @@ -378,7 +405,9 @@ CompressedSimpleSparsityPattern::print_gnuplot (std::ostream &out) const // j horizontal, gnuplot output is // x-y, that is we have to exchange // the order of output - out << *j << " " << -static_cast(row) << std::endl; + out << *j << " " + << -static_cast(rowindex) + << std::endl; } @@ -393,11 +422,13 @@ CompressedSimpleSparsityPattern::bandwidth () const unsigned int b=0; for (unsigned int row=0; row::const_iterator j=lines[row].entries.begin(); j != lines[row].entries.end(); ++j) - if (static_cast(std::abs(static_cast(row-*j))) > b) - b = std::abs(static_cast(row-*j)); + if (static_cast(std::abs(static_cast(rowindex-*j))) > b) + b = std::abs(static_cast(rowindex-*j)); } return b; @@ -418,6 +449,18 @@ CompressedSimpleSparsityPattern::n_nonzero_elements () const } +unsigned int +CompressedSimpleSparsityPattern::memory_consumption () const +{ + //TODO: IndexSet... + unsigned int mem = sizeof(CompressedSimpleSparsityPattern); + for (unsigned int i=0; i +#include #ifdef DEAL_II_USE_PETSC diff --git a/deal.II/lac/source/petsc_full_matrix.cc b/deal.II/lac/source/petsc_full_matrix.cc index 38645f961b..1525114c18 100644 --- a/deal.II/lac/source/petsc_full_matrix.cc +++ b/deal.II/lac/source/petsc_full_matrix.cc @@ -13,10 +13,11 @@ #include -#include #ifdef DEAL_II_USE_PETSC +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/petsc_matrix_base.cc b/deal.II/lac/source/petsc_matrix_base.cc index 1589683cd9..e40a96fc2c 100644 --- a/deal.II/lac/source/petsc_matrix_base.cc +++ b/deal.II/lac/source/petsc_matrix_base.cc @@ -13,13 +13,14 @@ #include -#include -#include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers @@ -609,6 +610,23 @@ namespace PETScWrappers MatView (matrix,PETSC_VIEWER_STDOUT_WORLD); } + + + unsigned int + MatrixBase::memory_consumption() const + { + MatInfo info; + MatGetInfo(matrix, MAT_LOCAL, &info); + + // report if sparsity pattern was not + // sufficient + if (info.mallocs) + std::cout << "*** PETSC-Matrix: num-allocs = " + << info.mallocs << " ***" << std::endl; + + return sizeof(*this) + info.memory; + } + } DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/lac/source/petsc_parallel_block_sparse_matrix.cc b/deal.II/lac/source/petsc_parallel_block_sparse_matrix.cc index ca98f33066..677f0eaeae 100644 --- a/deal.II/lac/source/petsc_parallel_block_sparse_matrix.cc +++ b/deal.II/lac/source/petsc_parallel_block_sparse_matrix.cc @@ -11,8 +11,8 @@ // //--------------------------------------------------------------------------- -#include +#include #ifdef DEAL_II_USE_PETSC diff --git a/deal.II/lac/source/petsc_parallel_block_vector.cc b/deal.II/lac/source/petsc_parallel_block_vector.cc index 9c074a14e8..fdd4fb1a58 100644 --- a/deal.II/lac/source/petsc_parallel_block_vector.cc +++ b/deal.II/lac/source/petsc_parallel_block_vector.cc @@ -13,11 +13,11 @@ #include -#include - #ifdef DEAL_II_USE_PETSC +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/petsc_parallel_sparse_matrix.cc b/deal.II/lac/source/petsc_parallel_sparse_matrix.cc index 02e166b1e4..1d998b3e99 100644 --- a/deal.II/lac/source/petsc_parallel_sparse_matrix.cc +++ b/deal.II/lac/source/petsc_parallel_sparse_matrix.cc @@ -13,14 +13,14 @@ #include -#include - -#include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/petsc_parallel_vector.cc b/deal.II/lac/source/petsc_parallel_vector.cc index 31e951f63d..53506ad031 100644 --- a/deal.II/lac/source/petsc_parallel_vector.cc +++ b/deal.II/lac/source/petsc_parallel_vector.cc @@ -13,13 +13,13 @@ #include -#include - -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/petsc_precondition.cc b/deal.II/lac/source/petsc_precondition.cc index d8ea360c72..b2bc95b8f2 100644 --- a/deal.II/lac/source/petsc_precondition.cc +++ b/deal.II/lac/source/petsc_precondition.cc @@ -12,14 +12,14 @@ //--------------------------------------------------------------------------- -#include -#include #include -#include - #ifdef DEAL_II_USE_PETSC +# include +# include +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/petsc_solver.cc b/deal.II/lac/source/petsc_solver.cc index 2eae9aecfa..cc9bf46911 100644 --- a/deal.II/lac/source/petsc_solver.cc +++ b/deal.II/lac/source/petsc_solver.cc @@ -13,14 +13,14 @@ #include -#include -#include -#include - -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include + #if (PETSC_VERSION_MAJOR == 2) && (PETSC_VERSION_MINOR < 2) # include #endif diff --git a/deal.II/lac/source/petsc_sparse_matrix.cc b/deal.II/lac/source/petsc_sparse_matrix.cc index cf3535145c..cd6c2e3796 100644 --- a/deal.II/lac/source/petsc_sparse_matrix.cc +++ b/deal.II/lac/source/petsc_sparse_matrix.cc @@ -13,14 +13,14 @@ #include -#include - -#include -#include -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/petsc_vector.cc b/deal.II/lac/source/petsc_vector.cc index e7773935a4..10fa556a39 100644 --- a/deal.II/lac/source/petsc_vector.cc +++ b/deal.II/lac/source/petsc_vector.cc @@ -14,10 +14,10 @@ #include -#include - #ifdef DEAL_II_USE_PETSC +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/petsc_vector_base.cc b/deal.II/lac/source/petsc_vector_base.cc index a339a2ab3b..259e8a1566 100644 --- a/deal.II/lac/source/petsc_vector_base.cc +++ b/deal.II/lac/source/petsc_vector_base.cc @@ -13,13 +13,13 @@ #include -#include -#include - -#include #ifdef DEAL_II_USE_PETSC +# include +# include +# include + DEAL_II_NAMESPACE_OPEN namespace PETScWrappers diff --git a/deal.II/lac/source/slepc_solver.cc b/deal.II/lac/source/slepc_solver.cc index ff59f948ae..58142a7c87 100644 --- a/deal.II/lac/source/slepc_solver.cc +++ b/deal.II/lac/source/slepc_solver.cc @@ -12,18 +12,20 @@ // //--------------------------------------------------------------------------- -#include -#include -#include -#include -#include -#include -#include +#include #ifdef DEAL_II_USE_SLEPC -#include +# include +# include +# include +# include + +# include +# include + +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/source/slepc_spectral_transformation.cc b/deal.II/lac/source/slepc_spectral_transformation.cc index 33a29b0474..a406a2bded 100644 --- a/deal.II/lac/source/slepc_spectral_transformation.cc +++ b/deal.II/lac/source/slepc_spectral_transformation.cc @@ -12,18 +12,20 @@ // //--------------------------------------------------------------------------- -#include -#include -#include -#include -#include -#include -#include +#include #ifdef DEAL_II_USE_SLEPC -#include +# include +# include +# include +# include + +# include +# include + +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/source/trilinos_block_sparse_matrix.cc b/deal.II/lac/source/trilinos_block_sparse_matrix.cc index 46469bd6fe..890816ed48 100644 --- a/deal.II/lac/source/trilinos_block_sparse_matrix.cc +++ b/deal.II/lac/source/trilinos_block_sparse_matrix.cc @@ -11,13 +11,14 @@ // //--------------------------------------------------------------------------- -#include -#include -#include +#include #ifdef DEAL_II_USE_TRILINOS +# include +# include + DEAL_II_NAMESPACE_OPEN namespace TrilinosWrappers diff --git a/deal.II/lac/source/trilinos_block_vector.cc b/deal.II/lac/source/trilinos_block_vector.cc index 2bcbeff550..b25e713ad8 100644 --- a/deal.II/lac/source/trilinos_block_vector.cc +++ b/deal.II/lac/source/trilinos_block_vector.cc @@ -11,12 +11,14 @@ // //--------------------------------------------------------------------------- -#include -#include +#include #ifdef DEAL_II_USE_TRILINOS +# include + + DEAL_II_NAMESPACE_OPEN namespace TrilinosWrappers diff --git a/deal.II/lac/source/trilinos_precondition.cc b/deal.II/lac/source/trilinos_precondition.cc index 1ecdbead3e..15b0885cbe 100644 --- a/deal.II/lac/source/trilinos_precondition.cc +++ b/deal.II/lac/source/trilinos_precondition.cc @@ -12,19 +12,20 @@ //--------------------------------------------------------------------------- -#include -#include #include -#include #ifdef DEAL_II_USE_TRILINOS -#include -#include -#include -#include -#include -#include +# include +# include +# include + +# include +# include +# include +# include +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/source/trilinos_precondition_block.cc b/deal.II/lac/source/trilinos_precondition_block.cc index e7777f7bf7..61fab8a232 100644 --- a/deal.II/lac/source/trilinos_precondition_block.cc +++ b/deal.II/lac/source/trilinos_precondition_block.cc @@ -11,23 +11,24 @@ // //--------------------------------------------------------------------------- -#include -#include -#include +#include #ifdef DEAL_II_USE_TRILINOS -#include -#include +# include +# include + +# include +# include -#include -#include -#include -#include -#include -#include -#include +# include +# include +# include +# include +# include +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/source/trilinos_solver.cc b/deal.II/lac/source/trilinos_solver.cc index 81097f8f10..8d0f4b4aba 100644 --- a/deal.II/lac/source/trilinos_solver.cc +++ b/deal.II/lac/source/trilinos_solver.cc @@ -12,16 +12,17 @@ //--------------------------------------------------------------------------- -#include #include -#include -#include -#include - -#include #ifdef DEAL_II_USE_TRILINOS +# include +# include +# include +# include + +# include + DEAL_II_NAMESPACE_OPEN namespace TrilinosWrappers diff --git a/deal.II/lac/source/trilinos_solver_block.cc b/deal.II/lac/source/trilinos_solver_block.cc index a9cd01815b..cbae827f56 100644 --- a/deal.II/lac/source/trilinos_solver_block.cc +++ b/deal.II/lac/source/trilinos_solver_block.cc @@ -11,34 +11,34 @@ // //--------------------------------------------------------------------------- -#include -#include -#include -#include - -#include -#include +#include #ifdef DEAL_II_USE_TRILINOS -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +# include +# include +# include + +# include + +# include +# include + +# include + +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/lac/source/trilinos_sparse_matrix.cc b/deal.II/lac/source/trilinos_sparse_matrix.cc index 01ab086ff8..f38d807112 100644 --- a/deal.II/lac/source/trilinos_sparse_matrix.cc +++ b/deal.II/lac/source/trilinos_sparse_matrix.cc @@ -11,21 +11,22 @@ // //--------------------------------------------------------------------------- -#include -#include -#include -#include -#include -#include -#include -#include +#include #ifdef DEAL_II_USE_TRILINOS -#include -#include -#include +# include +# include +# include +# include +# include +# include +# include + +# include +# include +# include DEAL_II_NAMESPACE_OPEN @@ -48,7 +49,7 @@ namespace TrilinosWrappers return; } - + // otherwise first flush Trilinos caches matrix->compress (); @@ -57,10 +58,10 @@ namespace TrilinosWrappers int ncols; int colnums = matrix->n(); TrilinosScalar *values = new TrilinosScalar(colnums); - + int ierr; - ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy((int)this->a_row, - colnums, + ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy((int)this->a_row, + colnums, ncols, &(values[0])); AssertThrow (ierr == 0, ExcTrilinosError(ierr)); @@ -91,80 +92,76 @@ namespace TrilinosWrappers // interface. SparseMatrix::SparseMatrix () : - communicator (Utilities::Trilinos::duplicate_communicator - (Utilities::Trilinos::comm_self())), - row_map (0, 0, *communicator), - col_map (0, 0, *communicator), - last_action (Zero), - compressed (true), + column_space_map (std::auto_ptr + (new Epetra_Map (0, 0, + Utilities::Trilinos::comm_self()))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(View, row_map, col_map, 0))) + (new Epetra_FECrsMatrix(View, *column_space_map, + *column_space_map, 0))), + last_action (Zero), + compressed (true) { matrix->FillComplete(); } - SparseMatrix::SparseMatrix (const Epetra_Map &InputMap, + SparseMatrix::SparseMatrix (const Epetra_Map &input_map, const unsigned int n_max_entries_per_row) : - communicator (Utilities::Trilinos::duplicate_communicator (InputMap.Comm())), - row_map (Utilities::Trilinos::duplicate_map (InputMap, *communicator)), - col_map (row_map), - last_action (Zero), - compressed (true), + column_space_map (std::auto_ptr + (new Epetra_Map (input_map))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(Copy, row_map, - int(n_max_entries_per_row), false))) + (new Epetra_FECrsMatrix(Copy, *column_space_map, + int(n_max_entries_per_row), false))), + last_action (Zero), + compressed (false) {} - SparseMatrix::SparseMatrix (const Epetra_Map &InputMap, + SparseMatrix::SparseMatrix (const Epetra_Map &input_map, const std::vector &n_entries_per_row) : - communicator (Utilities::Trilinos::duplicate_communicator (InputMap.Comm())), - row_map (Utilities::Trilinos::duplicate_map (InputMap, *communicator)), - col_map (row_map), - last_action (Zero), - compressed (true), + column_space_map (std::auto_ptr + (new Epetra_Map (input_map))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(Copy, row_map, + (new Epetra_FECrsMatrix(Copy, *column_space_map, (int*)const_cast(&(n_entries_per_row[0])), - false))) + false))), + last_action (Zero), + compressed (false) {} - SparseMatrix::SparseMatrix (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, const unsigned int n_max_entries_per_row) : - communicator (Utilities::Trilinos::duplicate_communicator (InputRowMap.Comm())), - row_map (Utilities::Trilinos::duplicate_map (InputRowMap, *communicator)), - col_map (Utilities::Trilinos::duplicate_map (InputColMap, *communicator)), - last_action (Zero), - compressed (true), + column_space_map (std::auto_ptr + (new Epetra_Map (input_col_map))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(Copy, row_map, - int(n_max_entries_per_row), false))) + (new Epetra_FECrsMatrix(Copy, input_row_map, + int(n_max_entries_per_row), false))), + last_action (Zero), + compressed (false) {} - SparseMatrix::SparseMatrix (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, const std::vector &n_entries_per_row) : - communicator (Utilities::Trilinos::duplicate_communicator (InputRowMap.Comm())), - row_map (Utilities::Trilinos::duplicate_map (InputRowMap, *communicator)), - col_map (Utilities::Trilinos::duplicate_map (InputColMap, *communicator)), - last_action (Zero), - compressed (true), + column_space_map (std::auto_ptr + (new Epetra_Map (input_col_map))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(Copy, row_map, + (new Epetra_FECrsMatrix(Copy, input_row_map, (int*)const_cast(&(n_entries_per_row[0])), - false))) + false))), + last_action (Zero), + compressed (false) {} @@ -173,12 +170,10 @@ namespace TrilinosWrappers const unsigned int n, const unsigned int n_max_entries_per_row) : - communicator (Utilities::Trilinos::duplicate_communicator - (Utilities::Trilinos::comm_self())), - row_map (m, 0, *communicator), - col_map (n, 0, *communicator), - last_action (Zero), - compressed (true), + column_space_map (std::auto_ptr + (new Epetra_Map (n, 0, + Utilities::Trilinos::comm_self()))), + // on one processor only, we know how the // columns of the matrix will be // distributed (everything on one @@ -188,9 +183,14 @@ namespace TrilinosWrappers // information from columns is only // available when entries have been added matrix (std::auto_ptr - (new Epetra_FECrsMatrix(Copy, row_map, col_map, - int(n_max_entries_per_row), - false))) + (new Epetra_FECrsMatrix(Copy, + Epetra_Map (n, 0, + Utilities::Trilinos::comm_self()), + *column_space_map, + n_max_entries_per_row, + false))), + last_action (Zero), + compressed (false) {} @@ -199,76 +199,140 @@ namespace TrilinosWrappers const unsigned int n, const std::vector &n_entries_per_row) : - communicator (Utilities::Trilinos::duplicate_communicator - (Utilities::Trilinos::comm_self())), - row_map (m, 0, *communicator), - col_map (n, 0, *communicator), + column_space_map (std::auto_ptr + (new Epetra_Map (n, 0, + Utilities::Trilinos::comm_self()))), + matrix (std::auto_ptr + (new Epetra_FECrsMatrix(Copy, + Epetra_Map (n, 0, + Utilities::Trilinos::comm_self()), + *column_space_map, + (int*)const_cast(&(n_entries_per_row[0])), + false))), + last_action (Zero), + compressed (false) + {} + + + + SparseMatrix::SparseMatrix (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const unsigned int n_max_entries_per_row) + : + column_space_map (std::auto_ptr + (new Epetra_Map(parallel_partitioning. + make_trilinos_map(communicator, false)))), + matrix (std::auto_ptr + (new Epetra_FECrsMatrix(Copy, + *column_space_map, + n_max_entries_per_row, + false))), + last_action (Zero), + compressed (false) + {} + + + + SparseMatrix::SparseMatrix (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + : + column_space_map (std::auto_ptr + (new Epetra_Map(parallel_partitioning. + make_trilinos_map(communicator, false)))), + matrix (std::auto_ptr + (new Epetra_FECrsMatrix(Copy, + *column_space_map, + (int*)const_cast(&(n_entries_per_row[0])), + false))), last_action (Zero), - compressed (true), + compressed (false) + {} + + + + SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const unsigned int n_max_entries_per_row) + : + column_space_map (std::auto_ptr + (new Epetra_Map(col_parallel_partitioning. + make_trilinos_map(communicator, false)))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(Copy, row_map, col_map, - (int*)const_cast(&(n_entries_per_row[0])), - false))) + (new Epetra_FECrsMatrix(Copy, + row_parallel_partitioning. + make_trilinos_map(communicator, false), + n_max_entries_per_row, + false))), + last_action (Zero), + compressed (false) {} - SparseMatrix::SparseMatrix (const SparsityPattern &InputSP) + SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) : - Subscriptor(), - communicator (Utilities::Trilinos::duplicate_communicator - (InputSP.range_partitioner().Comm())), - row_map (Utilities::Trilinos:: - duplicate_map (InputSP.range_partitioner(), - *communicator)), - col_map (Utilities::Trilinos:: - duplicate_map (InputSP.domain_partitioner(), - *communicator)), + column_space_map (std::auto_ptr + (new Epetra_Map(col_parallel_partitioning. + make_trilinos_map(communicator, false)))), + matrix (std::auto_ptr + (new Epetra_FECrsMatrix(Copy, + row_parallel_partitioning. + make_trilinos_map(communicator, false), + (int*)const_cast(&(n_entries_per_row[0])), + false))), last_action (Zero), - compressed (true), + compressed (false) + {} + + + + SparseMatrix::SparseMatrix (const SparsityPattern &sparsity_pattern) + : + column_space_map (std::auto_ptr + (new Epetra_Map (sparsity_pattern.domain_partitioner()))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(Copy, - InputSP.trilinos_sparsity_pattern(), - false))) + (new Epetra_FECrsMatrix(Copy, + sparsity_pattern.trilinos_sparsity_pattern(), + false))), + last_action (Zero), + compressed (true) { - Assert(InputSP.trilinos_sparsity_pattern().Filled() == true, + Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true, ExcMessage("The Trilinos sparsity pattern has not been compressed.")); compress(); } - SparseMatrix::SparseMatrix (const SparseMatrix &InputMatrix) + SparseMatrix::SparseMatrix (const SparseMatrix &input_matrix) : Subscriptor(), - communicator (Utilities::Trilinos::duplicate_communicator - (InputMatrix.row_map.Comm())), - row_map (Utilities::Trilinos:: - duplicate_map (InputMatrix.row_map, - *communicator)), - col_map (Utilities::Trilinos:: - duplicate_map (InputMatrix.col_map, - *communicator)), - last_action (Zero), - compressed (true), + column_space_map (std::auto_ptr + (new Epetra_Map (input_matrix.domain_partitioner()))), matrix (std::auto_ptr - (new Epetra_FECrsMatrix(*InputMatrix.matrix))) + (new Epetra_FECrsMatrix(*input_matrix.matrix))), + last_action (Zero), + compressed (true) {} SparseMatrix::~SparseMatrix () - { - Utilities::Trilinos::destroy_communicator (*communicator); - } + {} SparseMatrix & SparseMatrix::copy_from (const SparseMatrix &m) { - row_map = Utilities::Trilinos::duplicate_map (m.row_map, *communicator); - col_map = Utilities::Trilinos::duplicate_map (m.col_map, *communicator); + column_space_map = std::auto_ptr + (new Epetra_Map (m.domain_partitioner())); + // check whether we need to update the // partitioner or can just copy the data: // in case we have the same distribution, @@ -281,7 +345,7 @@ namespace TrilinosWrappers compress(); return *this; } - + template @@ -303,9 +367,10 @@ namespace TrilinosWrappers template void SparseMatrix::reinit (const Epetra_Map &input_map, - const SparsityType &sparsity_pattern) + const SparsityType &sparsity_pattern, + const bool exchange_data) { - reinit (input_map, input_map, sparsity_pattern); + reinit (input_map, input_map, sparsity_pattern, exchange_data); } @@ -314,12 +379,14 @@ namespace TrilinosWrappers void SparseMatrix::reinit (const Epetra_Map &input_row_map, const Epetra_Map &input_col_map, - const SparsityType &sparsity_pattern) + const SparsityType &sparsity_pattern, + const bool exchange_data) { matrix.reset(); const unsigned int n_rows = sparsity_pattern.n_rows(); + Assert (exchange_data == false, ExcNotImplemented()); if (input_row_map.Comm().MyPID() == 0) { Assert (input_row_map.NumGlobalElements() == (int)sparsity_pattern.n_rows(), @@ -330,8 +397,7 @@ namespace TrilinosWrappers sparsity_pattern.n_cols())); } - row_map = Utilities::Trilinos::duplicate_map (input_row_map, *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_col_map, *communicator); + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); std::vector n_entries_per_row(n_rows); @@ -362,13 +428,13 @@ namespace TrilinosWrappers // columns as well. Compare this with bug // # 4123 in the Sandia Bugzilla. std::auto_ptr graph; - if (row_map.Comm().NumProc() > 1) - graph = std::auto_ptr - (new Epetra_CrsGraph (Copy, row_map, + if (input_row_map.Comm().NumProc() > 1) + graph = std::auto_ptr + (new Epetra_CrsGraph (Copy, input_row_map, &n_entries_per_row[input_row_map.MinMyGID()], true)); else - graph = std::auto_ptr - (new Epetra_CrsGraph (Copy, row_map, col_map, + graph = std::auto_ptr + (new Epetra_CrsGraph (Copy, input_row_map, input_col_map, &n_entries_per_row[input_row_map.MinMyGID()], true)); // This functions assumes that the @@ -382,9 +448,9 @@ namespace TrilinosWrappers // now insert the indices std::vector row_indices; - + for (unsigned int row=0; rowInsertGlobalIndices (static_cast(row), + graph->InsertGlobalIndices (static_cast(row), row_length, &row_indices[0]); } @@ -400,7 +466,7 @@ namespace TrilinosWrappers // Eventually, optimize the graph // structure (sort indices, make memory // contiguous, etc). - graph->FillComplete(col_map, row_map); + graph->FillComplete(input_col_map, input_row_map); graph->OptimizeStorage(); // check whether we got the number of @@ -423,6 +489,21 @@ namespace TrilinosWrappers + template <> + void + SparseMatrix::reinit (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, + const CompressedSimpleSparsityPattern &sparsity_pattern, + const bool exchange_data) + { + SparsityPattern trilinos_sparsity; + trilinos_sparsity.reinit (input_row_map, input_col_map, sparsity_pattern, + exchange_data); + reinit (trilinos_sparsity); + } + + + // The CompressedSetSparsityPattern // class stores the columns // differently, so we need to @@ -432,7 +513,8 @@ namespace TrilinosWrappers void SparseMatrix::reinit (const Epetra_Map &input_row_map, const Epetra_Map &input_col_map, - const CompressedSetSparsityPattern &sparsity_pattern) + const CompressedSetSparsityPattern &sparsity_pattern, + const bool exchange_data) { // this function is similar to the other // reinit function with sparsity pattern @@ -442,6 +524,7 @@ namespace TrilinosWrappers const unsigned int n_rows = sparsity_pattern.n_rows(); + Assert (exchange_data == false, ExcInternalError()); if (input_row_map.Comm().MyPID() == 0) { Assert (input_row_map.NumGlobalElements() == (int)sparsity_pattern.n_rows(), @@ -452,8 +535,7 @@ namespace TrilinosWrappers sparsity_pattern.n_cols())); } - row_map = Utilities::Trilinos::duplicate_map (input_row_map, *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_col_map, *communicator); + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); std::vector n_entries_per_row(n_rows); @@ -461,13 +543,13 @@ namespace TrilinosWrappers n_entries_per_row[row] = sparsity_pattern.row_length(row); std::auto_ptr graph; - if (row_map.Comm().NumProc() > 1) - graph = std::auto_ptr - (new Epetra_CrsGraph (Copy, row_map, + if (input_row_map.Comm().NumProc() > 1) + graph = std::auto_ptr + (new Epetra_CrsGraph (Copy, input_row_map, &n_entries_per_row[input_row_map.MinMyGID()], true)); else - graph = std::auto_ptr - (new Epetra_CrsGraph (Copy, row_map, col_map, + graph = std::auto_ptr + (new Epetra_CrsGraph (Copy, input_row_map, input_col_map, &n_entries_per_row[input_row_map.MinMyGID()], true)); Assert (graph->NumGlobalRows() == (int)sparsity_pattern.n_rows(), @@ -477,23 +559,23 @@ namespace TrilinosWrappers std::vector row_indices; for (unsigned int row=0; rowInsertGlobalIndices (row, row_length, &row_indices[0]); } - graph->FillComplete(col_map, row_map); + graph->FillComplete(input_col_map, input_row_map); graph->OptimizeStorage(); Assert (graph->NumGlobalCols() == (int)sparsity_pattern.n_cols(), @@ -502,7 +584,7 @@ namespace TrilinosWrappers matrix = std::auto_ptr (new Epetra_FECrsMatrix(Copy, *graph, false)); - + last_action = Zero; compress(); } @@ -516,23 +598,10 @@ namespace TrilinosWrappers // sparsity pattern. matrix.reset(); - // model the communicator on the - // one used for the sparsity - // pattern - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator (sparsity_pattern.trilinos_communicator())); - - row_map = Utilities::Trilinos::duplicate_map (sparsity_pattern.range_partitioner(), - *communicator); - col_map = Utilities::Trilinos::duplicate_map (sparsity_pattern.domain_partitioner(), - *communicator); - - AssertThrow (sparsity_pattern.trilinos_sparsity_pattern().Filled() == true, - ExcMessage("The Trilinos sparsity pattern has not been compressed")); - + column_space_map = std::auto_ptr + (new Epetra_Map (sparsity_pattern.domain_partitioner())); matrix = std::auto_ptr - (new Epetra_FECrsMatrix(Copy, sparsity_pattern.trilinos_sparsity_pattern(), + (new Epetra_FECrsMatrix(Copy, sparsity_pattern.trilinos_sparsity_pattern(), false)); compress(); } @@ -544,13 +613,11 @@ namespace TrilinosWrappers { matrix.reset(); - row_map = Utilities::Trilinos::duplicate_map (sparse_matrix.range_partitioner(), - *communicator); - col_map = Utilities::Trilinos::duplicate_map (sparse_matrix.domain_partitioner(), - *communicator); + column_space_map = std::auto_ptr + (new Epetra_Map (sparse_matrix.domain_partitioner())); matrix = std::auto_ptr - (new Epetra_FECrsMatrix(Copy, sparse_matrix.trilinos_sparsity_pattern(), + (new Epetra_FECrsMatrix(Copy, sparse_matrix.trilinos_sparsity_pattern(), false)); compress(); @@ -582,7 +649,7 @@ namespace TrilinosWrappers const double drop_tolerance, const bool copy_values) { - reinit (input_map, input_map, dealii_sparse_matrix, drop_tolerance, + reinit (input_map, input_map, dealii_sparse_matrix, drop_tolerance, copy_values); } @@ -602,7 +669,7 @@ namespace TrilinosWrappers { // in case we do not copy values, just // call the other function. - reinit (input_row_map, input_col_map, + reinit (input_row_map, input_col_map, dealii_sparse_matrix.get_sparsity_pattern()); return; } @@ -616,18 +683,17 @@ namespace TrilinosWrappers ExcDimensionMismatch (input_col_map.NumGlobalElements(), dealii_sparse_matrix.n())); - row_map = Utilities::Trilinos::duplicate_map (input_row_map, *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_col_map, *communicator); + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); std::vector n_entries_per_row(n_rows); for (unsigned int row=0; row - (new Epetra_FECrsMatrix(Copy, row_map, - &n_entries_per_row[row_map.MinMyGID()], + (new Epetra_FECrsMatrix(Copy, input_row_map, + &n_entries_per_row[input_row_map.MinMyGID()], false)); std::vector values; @@ -638,9 +704,9 @@ namespace TrilinosWrappers values.resize (n_entries_per_row[row],0.); row_indices.resize (n_entries_per_row[row], numbers::invalid_unsigned_int); - + unsigned int index = 0; - for (typename ::dealii::SparseMatrix::const_iterator + for (typename ::dealii::SparseMatrix::const_iterator p = dealii_sparse_matrix.begin(row); p != dealii_sparse_matrix.end(row); ++p) if (std::fabs(p->value()) > drop_tolerance) @@ -658,7 +724,7 @@ namespace TrilinosWrappers - void + void SparseMatrix::reinit (const Epetra_CrsMatrix &input_matrix, const bool copy_values) { @@ -667,15 +733,15 @@ namespace TrilinosWrappers Assert (input_matrix.Filled()==true, ExcMessage("Input CrsMatrix has not called FillComplete()!")); - row_map = Utilities::Trilinos::duplicate_map (input_matrix.RangeMap(), *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_matrix.DomainMap(), *communicator); + column_space_map = std::auto_ptr + (new Epetra_Map (input_matrix.DomainMap())); const Epetra_CrsGraph *graph = &input_matrix.Graph(); - matrix = std::auto_ptr + matrix = std::auto_ptr (new Epetra_FECrsMatrix(Copy, *graph, false)); - matrix->FillComplete (col_map, row_map, true); + matrix->FillComplete (*column_space_map, input_matrix.RangeMap(), true); if (copy_values == true) { @@ -698,14 +764,12 @@ namespace TrilinosWrappers // the pointer and generate an // empty matrix. matrix.reset(); + column_space_map.reset(); - row_map = Epetra_Map (0, 0, - Utilities::Trilinos::comm_self()); - - col_map = row_map; - - matrix = std::auto_ptr - (new Epetra_FECrsMatrix(View, row_map, 0)); + column_space_map = std::auto_ptr + (new Epetra_Map (0, 0, Utilities::Trilinos::comm_self())); + matrix = std::auto_ptr + (new Epetra_FECrsMatrix(View, *column_space_map, 0)); matrix->FillComplete(); @@ -718,8 +782,7 @@ namespace TrilinosWrappers SparseMatrix::clear_row (const unsigned int row, const TrilinosScalar new_diag_value) { - Assert (matrix->Filled()==true, - ExcMessage("Matrix must be compressed before invoking clear_row.")); + Assert (matrix->Filled()==true, ExcMatrixNotCompressed()); // Only do this on the rows owned // locally on this processor. @@ -731,11 +794,11 @@ namespace TrilinosWrappers int num_entries; const int ierr = matrix->ExtractMyRowView(local_row, num_entries, values, col_indices); - + Assert (ierr == 0, ExcTrilinosError(ierr)); - - int* diag_find = std::find(col_indices,col_indices+num_entries, + + int* diag_find = std::find(col_indices,col_indices+num_entries, local_row); int diag_index = (int)(diag_find - col_indices); @@ -743,7 +806,7 @@ namespace TrilinosWrappers if (diag_index != j || new_diag_value == 0) values[j] = 0.; - if (diag_find && std::fabs(values[diag_index]) == 0.0 && + if (diag_find && std::fabs(values[diag_index]) == 0.0 && new_diag_value != 0.0) values[diag_index] = new_diag_value; } @@ -794,8 +857,7 @@ namespace TrilinosWrappers // Check whether the matrix has // already been transformed to local // indices. - if (matrix->Filled() == false) - matrix->GlobalAssemble(col_map, row_map, true); + Assert (matrix->Filled(), ExcMatrixNotCompressed()); // Prepare pointers for extraction // of a view of the row. @@ -817,7 +879,7 @@ namespace TrilinosWrappers // Search the index where we // look for the value, and then // finally get it. - + int* el_find = std::find(col_indices, col_indices + nnz_present, trilinos_j); @@ -869,11 +931,10 @@ namespace TrilinosWrappers } else { - // Check whether the matrix + // Check whether the matrix // already is transformed to // local indices. - if (!matrix->Filled()) - matrix->GlobalAssemble(col_map, row_map, true); + Assert (matrix->Filled(), ExcMatrixNotCompressed()); // Prepare pointers for extraction // of a view of the row. @@ -957,7 +1018,7 @@ namespace TrilinosWrappers - namespace internals + namespace internals { void perform_mmult (const SparseMatrix &inputleft, const SparseMatrix &inputright, @@ -968,14 +1029,14 @@ namespace TrilinosWrappers const bool use_vector = V.size() == inputright.m() ? true : false; if (transpose_left == false) { - Assert (inputleft.n() == inputright.m(), + Assert (inputleft.n() == inputright.m(), ExcDimensionMismatch(inputleft.n(), inputright.m())); Assert (inputleft.domain_partitioner().SameAs(inputright.range_partitioner()), ExcMessage ("Parallel partitioning of A and B does not fit.")); } else { - Assert (inputleft.m() == inputright.m(), + Assert (inputleft.m() == inputright.m(), ExcDimensionMismatch(inputleft.m(), inputright.m())); Assert (inputleft.range_partitioner().SameAs(inputright.range_partitioner()), ExcMessage ("Parallel partitioning of A and B does not fit.")); @@ -997,10 +1058,10 @@ namespace TrilinosWrappers } else { - mod_B = Teuchos::rcp(new Epetra_CrsMatrix (Copy, + mod_B = Teuchos::rcp(new Epetra_CrsMatrix (Copy, inputright.trilinos_sparsity_pattern()), true); - mod_B->FillComplete(inputright.domain_partitioner(), + mod_B->FillComplete(inputright.domain_partitioner(), inputright.range_partitioner()); Assert (inputright.local_range() == V.local_range(), ExcMessage ("Parallel distribution of matrix B and vector V " @@ -1052,7 +1113,7 @@ namespace TrilinosWrappers // import data if necessary ML_Operator *Btmp, *Ctmp, *Ctmp2, *tptr; - ML_CommInfoOP *getrow_comm; + ML_CommInfoOP *getrow_comm; int max_per_proc; int N_input_vector = B_->invec_leng; getrow_comm = B_->getrow->pre_comm; @@ -1065,7 +1126,7 @@ namespace TrilinosWrappers ML_create_unique_col_id(N_input_vector, &(B_->getrow->loc_glob_map), getrow_comm, &max_per_proc, B_->comm); B_->getrow->use_loc_glob_map = ML_YES; - if (A_->getrow->pre_comm != NULL) + if (A_->getrow->pre_comm != NULL) ML_exchange_rows( B_, &Btmp, A_->getrow->pre_comm); else Btmp = B_; @@ -1077,10 +1138,10 @@ namespace TrilinosWrappers ML_free(B_->getrow->loc_glob_map); B_->getrow->loc_glob_map = NULL; B_->getrow->use_loc_glob_map = ML_NO; - if (A_->getrow->pre_comm != NULL) + if (A_->getrow->pre_comm != NULL) { tptr = Btmp; - while ( (tptr!= NULL) && (tptr->sub_matrix != B_)) + while ( (tptr!= NULL) && (tptr->sub_matrix != B_)) tptr = tptr->sub_matrix; if (tptr != NULL) tptr->sub_matrix = NULL; ML_RECUR_CSR_MSRdata_Destroy(Btmp); @@ -1090,7 +1151,7 @@ namespace TrilinosWrappers // make correct data structures if (A_->getrow->post_comm != NULL) ML_exchange_rows(Ctmp, &Ctmp2, A_->getrow->post_comm); - else + else Ctmp2 = Ctmp; ML_back_to_csrlocal(Ctmp2, C_, max_per_proc); @@ -1098,7 +1159,7 @@ namespace TrilinosWrappers ML_RECUR_CSR_MSRdata_Destroy (Ctmp); ML_Operator_Destroy (&Ctmp); - if (A_->getrow->post_comm != NULL) + if (A_->getrow->post_comm != NULL) { ML_RECUR_CSR_MSRdata_Destroy(Ctmp2); ML_Operator_Destroy (&Ctmp2); @@ -1149,7 +1210,7 @@ namespace TrilinosWrappers { Assert (rhs.m() == m(), ExcDimensionMismatch (rhs.m(), m())); Assert (rhs.n() == n(), ExcDimensionMismatch (rhs.n(), n())); - + const std::pair local_range = rhs.local_range(); @@ -1166,14 +1227,14 @@ namespace TrilinosWrappers // data on both matrices and simply // manipulate the values that are // addressed by the pointers. - if (matrix->Filled() == true && + if (matrix->Filled() == true && rhs.matrix->Filled() == true && - this->local_range() == local_range && + this->local_range() == local_range && matrix->NumMyNonzeros() == rhs.matrix->NumMyNonzeros()) for (unsigned int row=local_range.first; row < local_range.second; ++row) { - Assert (matrix->NumGlobalEntries(row) == + Assert (matrix->NumGlobalEntries(row) == rhs.matrix->NumGlobalEntries(row), ExcDimensionMismatch(matrix->NumGlobalEntries(row), rhs.matrix->NumGlobalEntries(row))); @@ -1192,7 +1253,7 @@ namespace TrilinosWrappers // just working with the values. #ifdef DEBUG int *index_ptr, *rhs_index_ptr; - ierr = rhs.matrix->ExtractMyRowView (row_local, rhs_n_entries, + ierr = rhs.matrix->ExtractMyRowView (row_local, rhs_n_entries, rhs_value_ptr, rhs_index_ptr); Assert (ierr == 0, ExcTrilinosError(ierr)); @@ -1223,7 +1284,7 @@ namespace TrilinosWrappers // data, multiply it by the factor and // then add it to the matrix using the // respective add() function. - else + else { unsigned int max_row_length = 0; for (unsigned int row=local_range.first; @@ -1231,10 +1292,10 @@ namespace TrilinosWrappers max_row_length = std::max (max_row_length, static_cast(rhs.matrix->NumGlobalEntries(row))); - + std::vector column_indices (max_row_length); std::vector values (max_row_length); - + if (matrix->Filled() == true && rhs.matrix->Filled() == true && this->local_range() == local_range) for (unsigned int row=local_range.first; @@ -1264,14 +1325,14 @@ namespace TrilinosWrappers row < local_range.second; ++row) { int n_entries; - ierr = rhs.matrix->Epetra_CrsMatrix::ExtractGlobalRowCopy + ierr = rhs.matrix->Epetra_CrsMatrix::ExtractGlobalRowCopy ((int)row, max_row_length, n_entries, &values[0], &column_indices[0]); Assert (ierr == 0, ExcTrilinosError(ierr)); for (int i=0; iEpetra_CrsMatrix::SumIntoGlobalValues + ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues ((int)row, n_entries, &values[0], &column_indices[0]); Assert (ierr == 0, ExcTrilinosError(ierr)); } @@ -1280,11 +1341,11 @@ namespace TrilinosWrappers } } } - + void - SparseMatrix::transpose () + SparseMatrix::transpose () { // This only flips a flag that tells // Trilinos that any vmult operation @@ -1311,11 +1372,11 @@ namespace TrilinosWrappers SparseMatrix::is_symmetric (const double tolerance) const { (void)tolerance; - + Assert (false, ExcNotImplemented()); return false; - } + } @@ -1324,7 +1385,7 @@ namespace TrilinosWrappers { Assert (false, ExcNotImplemented()); return false; - } + } @@ -1340,20 +1401,26 @@ namespace TrilinosWrappers // ouput is generated in case of // multiple processors. void - SparseMatrix::print (std::ostream &out) const + SparseMatrix::print (std::ostream &out, + const bool print_detailed_trilinos_information) const { - double * values; - int * indices; - int num_entries; - - for (int i=0; iNumMyRows(); ++i) + if (print_detailed_trilinos_information == true) + out << *matrix; + else { - matrix->ExtractMyRowView (i, num_entries, values, indices); - for (int j=0; jGRID(j)] << ") " - << values[j] << std::endl; + double * values; + int * indices; + int num_entries; + + for (int i=0; iNumMyRows(); ++i) + { + matrix->ExtractMyRowView (i, num_entries, values, indices); + for (int j=0; jGRID(j)] << ") " + << values[j] << std::endl; + } } - + AssertThrow (out, ExcIO()); } @@ -1373,30 +1440,32 @@ namespace TrilinosWrappers template void SparseMatrix::reinit (const Epetra_Map &, - const dealii::SparsityPattern &); + const dealii::SparsityPattern &, + const bool); template void SparseMatrix::reinit (const Epetra_Map &, - const CompressedSparsityPattern &); + const CompressedSparsityPattern &, + const bool); template void SparseMatrix::reinit (const Epetra_Map &, - const CompressedSetSparsityPattern &); + const CompressedSetSparsityPattern &, + const bool); template void SparseMatrix::reinit (const Epetra_Map &, - const CompressedSimpleSparsityPattern &); + const CompressedSimpleSparsityPattern &, + const bool); template void SparseMatrix::reinit (const Epetra_Map &, const Epetra_Map &, - const dealii::SparsityPattern &); - template void - SparseMatrix::reinit (const Epetra_Map &, - const Epetra_Map &, - const CompressedSparsityPattern &); + const dealii::SparsityPattern &, + const bool); template void SparseMatrix::reinit (const Epetra_Map &, const Epetra_Map &, - const CompressedSimpleSparsityPattern &); + const CompressedSparsityPattern &, + const bool); template void SparseMatrix::reinit (const dealii::SparseMatrix &, diff --git a/deal.II/lac/source/trilinos_sparsity_pattern.cc b/deal.II/lac/source/trilinos_sparsity_pattern.cc index 71a4ffe94e..82d6ac235f 100644 --- a/deal.II/lac/source/trilinos_sparsity_pattern.cc +++ b/deal.II/lac/source/trilinos_sparsity_pattern.cc @@ -11,15 +11,17 @@ // //--------------------------------------------------------------------------- -#include + #include -#include -#include -#include -#include #ifdef DEAL_II_USE_TRILINOS +# include +# include +# include +# include +# include + DEAL_II_NAMESPACE_OPEN namespace TrilinosWrappers @@ -40,7 +42,7 @@ namespace TrilinosWrappers return; } - + // otherwise first flush Trilinos caches sparsity_pattern->compress (); @@ -48,11 +50,11 @@ namespace TrilinosWrappers // row int ncols; int colnums = sparsity_pattern->n_cols(); - + int ierr; - ierr = sparsity_pattern->graph->ExtractGlobalRowCopy((int)this->a_row, + ierr = sparsity_pattern->graph->ExtractGlobalRowCopy((int)this->a_row, colnums, - ncols, + ncols, (int*)&(*colnum_cache)[0]); AssertThrow (ierr == 0, ExcTrilinosError(ierr)); @@ -82,184 +84,91 @@ namespace TrilinosWrappers // interface. SparsityPattern::SparsityPattern () : - communicator (Utilities::Trilinos::duplicate_communicator - (Utilities::Trilinos::comm_self())), - row_map (0, 0, *communicator), - col_map (0, 0, *communicator), - compressed (true), - graph (std::auto_ptr - (new Epetra_FECrsGraph(View, row_map, col_map, 0))) + compressed (true) { + column_space_map = std::auto_ptr + (new Epetra_Map (0, 0, Utilities::Trilinos::comm_self())); + graph = std::auto_ptr + (new Epetra_FECrsGraph(View, *column_space_map, *column_space_map, 0)); graph->FillComplete(); } - - SparsityPattern::SparsityPattern (const Epetra_Map &InputMap, + + SparsityPattern::SparsityPattern (const Epetra_Map &input_map, const unsigned int n_entries_per_row) - : - communicator (Utilities::Trilinos::duplicate_communicator (InputMap.Comm())), - row_map (Utilities::Trilinos::duplicate_map (InputMap, *communicator)), - col_map (row_map), - compressed (false), - // for more than one processor, need to - // specify only row map first and let the - // matrix entries decide about the column - // map (which says which columns are - // present in the matrix, not to be - // confused with the col_map that tells - // how the domain dofs of the matrix will - // be distributed). for only one - // processor, we can directly assign the - // columns as well. - graph (row_map.Comm().NumProc() > 1 ? - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, - (int)n_entries_per_row, - false))) - : - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - (int)n_entries_per_row, - false))) - ) - {} - - SparsityPattern::SparsityPattern (const Epetra_Map &InputMap, + { + reinit (input_map, input_map, n_entries_per_row); + } + + + + SparsityPattern::SparsityPattern (const Epetra_Map &input_map, const std::vector &n_entries_per_row) - : - communicator (Utilities::Trilinos:: - duplicate_communicator (InputMap.Comm())), - row_map (Utilities::Trilinos::duplicate_map (InputMap, *communicator)), - col_map (row_map), - compressed (false), - graph (row_map.Comm().NumProc() > 1 ? - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, - (int*)const_cast - (&(n_entries_per_row[row_map.MinMyGID()])), - false))) - : - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - (int*)const_cast - (&(n_entries_per_row[row_map.MinMyGID()])), - false))) - ) - {} + { + reinit (input_map, input_map, n_entries_per_row); + } + - SparsityPattern::SparsityPattern (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + + SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, const unsigned int n_entries_per_row) - : - communicator (Utilities::Trilinos:: - duplicate_communicator (InputRowMap.Comm())), - row_map (Utilities::Trilinos:: - duplicate_map (InputRowMap, *communicator)), - col_map (Utilities::Trilinos:: - duplicate_map (InputColMap, *communicator)), - compressed (false), - graph (row_map.Comm().NumProc() > 1 ? - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, - (int)n_entries_per_row, - false))) - : - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - (int)n_entries_per_row, - false))) - ) - {} + { + reinit (input_row_map, input_col_map, n_entries_per_row); + } + + - SparsityPattern::SparsityPattern (const Epetra_Map &InputRowMap, - const Epetra_Map &InputColMap, + SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, const std::vector &n_entries_per_row) - : - communicator (Utilities::Trilinos:: - duplicate_communicator (InputRowMap.Comm())), - row_map (Utilities::Trilinos:: - duplicate_map (InputRowMap, *communicator)), - col_map (Utilities::Trilinos:: - duplicate_map (InputColMap, *communicator)), - compressed (false), - graph (row_map.Comm().NumProc() > 1 ? - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, - (int*)const_cast - (&(n_entries_per_row[row_map.MinMyGID()])), - false))) - : - (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - (int*)const_cast - (&(n_entries_per_row[row_map.MinMyGID()])), - false))) - ) - {} + { + reinit (input_row_map, input_col_map, n_entries_per_row); + } + + SparsityPattern::SparsityPattern (const unsigned int m, const unsigned int n, const unsigned int n_entries_per_row) - : - communicator (Utilities::Trilinos::duplicate_communicator - (Utilities::Trilinos::comm_self())), - row_map (m, 0, *communicator), - col_map (n, 0, *communicator), - compressed (false), - graph (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - int(n_entries_per_row), false))) - {} + { + reinit (m, n, n_entries_per_row); + } + + - SparsityPattern::SparsityPattern (const unsigned int m, const unsigned int n, const std::vector &n_entries_per_row) - : - communicator (Utilities::Trilinos::duplicate_communicator - (Utilities::Trilinos::comm_self())), - row_map (m, 0, *communicator), - col_map (n, 0, *communicator), - compressed (false), - graph (std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - (int*)const_cast(&(n_entries_per_row[0])), - false))) - {} + { + reinit (m, n, n_entries_per_row); + } + - // Copy function is currently not working - // because the Trilinos Epetra_FECrsGraph - // does not implement a constructor from - // another graph. - /* - SparsityPattern::SparsityPattern (const SparsityPattern &InputSP) + // Copy function only works if the + // sparsity pattern is empty. + SparsityPattern::SparsityPattern (const SparsityPattern &input_sparsity) : Subscriptor(), - row_map (InputSP.row_map), - col_map (InputSP.col_map), - compressed (false), - graph (std::auto_ptr - (new Epetra_FECrsGraph(*InputSP.graph))) - {} - */ + column_space_map (std::auto_ptr + (new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))), + compressed (false), + graph (std::auto_ptr + (new Epetra_FECrsGraph(View, *column_space_map, + *column_space_map, 0))) + { + Assert (input_sparsity.n_rows() == 0, + ExcMessage ("Copy constructor only works for empty sparsity patterns.")); + } SparsityPattern::~SparsityPattern () - { -// this is sorta tricky. we can't destroy the communicator here -// if we have initialized a matrix with it because the matrix -// keeps a reference count to the sparsity pattern and so -// the communicator has to stay alive even though the sparsity -// pattern goes out of scope :-( -// -// TODO: find a way to fix this -// Utilities::Trilinos::destroy_communicator (*communicator); - } + {} - void + void SparsityPattern::reinit (const Epetra_Map &input_map, const unsigned int n_entries_per_row) { @@ -267,7 +176,7 @@ namespace TrilinosWrappers } - void + void SparsityPattern::reinit (const unsigned int m, const unsigned int n, const unsigned int n_entries_per_row) @@ -279,22 +188,15 @@ namespace TrilinosWrappers } - void + void SparsityPattern::reinit (const Epetra_Map &input_row_map, const Epetra_Map &input_col_map, const unsigned int n_entries_per_row) { - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator (input_row_map.Comm())); - - row_map = Utilities::Trilinos::duplicate_map (input_row_map, - *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_col_map, - *communicator); - + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); graph.reset(); - + compressed = false; + // for more than one processor, need to // specify only row map first and let the // matrix entries decide about the column @@ -305,17 +207,18 @@ namespace TrilinosWrappers // be distributed). for only one // processor, we can directly assign the // columns as well. - if (row_map.Comm().NumProc() > 1) + if (input_row_map.Comm().NumProc() > 1) graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, n_entries_per_row, false)); + (new Epetra_FECrsGraph(Copy, input_row_map, n_entries_per_row, false)); else graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, n_entries_per_row, false)); + (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map, + n_entries_per_row, false)); } - void + void SparsityPattern::reinit (const Epetra_Map &input_map, const std::vector &n_entries_per_row) { @@ -324,7 +227,7 @@ namespace TrilinosWrappers - void + void SparsityPattern::reinit (const unsigned int m, const unsigned int n, const std::vector &n_entries_per_row) @@ -337,104 +240,98 @@ namespace TrilinosWrappers - void + void SparsityPattern::reinit (const Epetra_Map &input_row_map, const Epetra_Map &input_col_map, const std::vector &n_entries_per_row) { - Assert (n_entries_per_row.size() == + Assert (n_entries_per_row.size() == static_cast(input_row_map.NumGlobalElements()), ExcDimensionMismatch (n_entries_per_row.size(), input_row_map.NumGlobalElements())); - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator (input_row_map.Comm())); - - row_map = Utilities::Trilinos::duplicate_map (input_row_map, - *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_col_map, - *communicator); - + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); graph.reset(); + compressed = false; - if (row_map.Comm().NumProc() > 1) + if (input_row_map.Comm().NumProc() > 1) graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, - n_entries_per_row[input_row_map.MinMyGID()], + (new Epetra_FECrsGraph(Copy, input_row_map, + n_entries_per_row[input_row_map.MinMyGID()], false)); else graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - n_entries_per_row[input_row_map.MinMyGID()], - false)); + (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map, + n_entries_per_row[input_row_map.MinMyGID()], + false)); } template - void + void SparsityPattern::reinit (const Epetra_Map &input_map, - const SparsityType &sp) + const SparsityType &sp, + const bool exchange_data) { - reinit (input_map, input_map, sp); + reinit (input_map, input_map, sp, exchange_data); } template - void + void SparsityPattern::reinit (const Epetra_Map &input_row_map, const Epetra_Map &input_col_map, - const SparsityType &sp) + const SparsityType &sp, + const bool exchange_data) { - Assert (sp.n_rows() == + Assert (sp.n_rows() == static_cast(input_row_map.NumGlobalElements()), ExcDimensionMismatch (sp.n_rows(), input_row_map.NumGlobalElements())); - Assert (sp.n_cols() == + Assert (sp.n_cols() == static_cast(input_col_map.NumGlobalElements()), ExcDimensionMismatch (sp.n_cols(), input_col_map.NumGlobalElements())); + Assert (exchange_data == false, ExcNotImplemented()); - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator (input_row_map.Comm())); - - row_map = Utilities::Trilinos::duplicate_map (input_row_map, - *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_col_map, - *communicator); - + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); graph.reset(); + compressed = false; const unsigned int n_rows = sp.n_rows(); - std::vector n_entries_per_row(n_rows); + Assert (input_row_map.LinearMap() == true, + ExcMessage ("This function is not efficient if the map is not contiguous.")); - for (unsigned int row=0; row n_entries_per_row(input_row_map.MaxMyGID()- + input_row_map.MinMyGID() + 1); + + for (unsigned int row=input_row_map.MinMyGID(); + row(input_row_map.MaxMyGID()+1); + ++row) + n_entries_per_row[row-input_row_map.MinMyGID()] = sp.row_length(row); - if (row_map.Comm().NumProc() > 1) + if (input_row_map.Comm().NumProc() > 1) graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, - n_entries_per_row[input_row_map.MinMyGID()], + (new Epetra_FECrsGraph(Copy, input_row_map, + n_entries_per_row[0], false)); else graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - n_entries_per_row[input_row_map.MinMyGID()], - false)); + (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map, + n_entries_per_row[0], + false)); Assert (graph->NumGlobalRows() == (int)sp.n_rows(), ExcDimensionMismatch (graph->NumGlobalRows(), sp.n_rows())); - std::vector row_indices; - + for (unsigned int row=0; rowEpetra_CrsGraph::InsertGlobalIndices (row, row_length, + graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length, &row_indices[0]); } @@ -451,13 +348,14 @@ namespace TrilinosWrappers - template<> - void + template <> + void SparsityPattern::reinit (const Epetra_Map &input_row_map, const Epetra_Map &input_col_map, - const CompressedSetSparsityPattern &sp) + const CompressedSimpleSparsityPattern &sp, + const bool exchange_data) { - Assert (sp.n_rows() == + Assert (sp.n_rows() == static_cast(input_row_map.NumGlobalElements()), ExcDimensionMismatch (sp.n_rows(), input_row_map.NumGlobalElements())); @@ -466,34 +364,113 @@ namespace TrilinosWrappers ExcDimensionMismatch (sp.n_cols(), input_col_map.NumGlobalElements())); - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator (input_row_map.Comm())); + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); + graph.reset(); + compressed = false; + + Assert (input_row_map.LinearMap() == true, + ExcMessage ("This function is not efficient if the map is not contiguous.")); + + std::vector n_entries_per_row(input_row_map.MaxMyGID()- + input_row_map.MinMyGID() + 1); - row_map = Utilities::Trilinos::duplicate_map (input_row_map, - *communicator); - col_map = Utilities::Trilinos::duplicate_map (input_col_map, - *communicator); + for (unsigned int row=input_row_map.MinMyGID(); + row(input_row_map.MaxMyGID()+1); + ++row) + n_entries_per_row[row-input_row_map.MinMyGID()] = sp.row_length(row); + + if (input_row_map.Comm().NumProc() > 1) + graph = std::auto_ptr + (new Epetra_FECrsGraph(Copy, input_row_map, + n_entries_per_row[0], + false)); + else + graph = std::auto_ptr + (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map, + n_entries_per_row[0], + false)); + + Assert (graph->NumGlobalRows() == (int)sp.n_rows(), + ExcDimensionMismatch (graph->NumGlobalRows(), + sp.n_rows())); + + const unsigned int n_rows = sp.n_rows(); + std::vector row_indices; + + // Include possibility to exchange data + // since CompressedSimpleSparsityPattern is + // able to do so + for (unsigned int row=0; rowEpetra_CrsGraph::InsertGlobalIndices (row, row_length, + &row_indices[0]); + } + else if ( exchange_data && sp.row_index_set().is_element(row) ) + { + const int row_length = sp.row_length(row); + row_indices.resize (row_length, -1); + + for (int col=0; col < row_length; ++col) + row_indices[col] = sp.column_number (row, col); + + graph->InsertGlobalIndices (1, (int*)&row, row_length, &row_indices[0]); + } + + compress(); + } + + + template<> + void + SparsityPattern::reinit (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, + const CompressedSetSparsityPattern &sp, + const bool exchange_data) + { + Assert (exchange_data == false, ExcNotImplemented()); + Assert (sp.n_rows() == + static_cast(input_row_map.NumGlobalElements()), + ExcDimensionMismatch (sp.n_rows(), + input_row_map.NumGlobalElements())); + Assert (sp.n_cols() == + static_cast(input_col_map.NumGlobalElements()), + ExcDimensionMismatch (sp.n_cols(), + input_col_map.NumGlobalElements())); + + column_space_map = std::auto_ptr (new Epetra_Map (input_col_map)); graph.reset(); + compressed = false; const unsigned int n_rows = sp.n_rows(); - std::vector n_entries_per_row(n_rows); + std::vector n_entries_per_row(input_row_map.MaxMyGID()- + input_row_map.MinMyGID() + 1); + for (unsigned int row=input_row_map.MinMyGID(); + row(input_row_map.MaxMyGID()+1); + ++row) + { + n_entries_per_row[row-input_row_map.MinMyGID()] = sp.row_length(row); + } - for (unsigned int row=0; row 1) + if (input_row_map.Comm().NumProc() > 1) graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, - n_entries_per_row[input_row_map.MinMyGID()], + (new Epetra_FECrsGraph(Copy, input_row_map, + n_entries_per_row[0], false)); else graph = std::auto_ptr - (new Epetra_FECrsGraph(Copy, row_map, col_map, - n_entries_per_row[input_row_map.MinMyGID()], - false)); + (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map, + n_entries_per_row[0], + false)); Assert (graph->NumGlobalRows() == (int)sp.n_rows(), ExcDimensionMismatch (graph->NumGlobalRows(), @@ -501,22 +478,27 @@ namespace TrilinosWrappers std::vector row_indices; - - for (unsigned int row=0; row(input_row_map.MaxMyGID()+1); + ++row) + if (exchange_data || input_row_map.MyGID(row)) { const int row_length = sp.row_length(row); + if (row_length == 0) + continue; + row_indices.resize (row_length, -1); - CompressedSetSparsityPattern::row_iterator col_num = + CompressedSetSparsityPattern::row_iterator col_num = sp.row_begin (row); - for (unsigned int col = 0; - col_num != sp.row_end (row); + for (unsigned int col = 0; + col_num != sp.row_end (row); ++col_num, ++col) row_indices[col] = *col_num; - graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length, + graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length, &row_indices[0]); } @@ -531,10 +513,10 @@ namespace TrilinosWrappers Assert (false, ExcNotImplemented()); } - + template - void + void SparsityPattern::copy_from (const SparsityType &sp) { const Epetra_Map rows (sp.n_rows(), 0, Utilities::Trilinos::comm_self()); @@ -550,15 +532,14 @@ namespace TrilinosWrappers { // When we clear the matrix, reset // the pointer and generate an - // empty matrix. + // empty sparsity pattern. + column_space_map.reset(); graph.reset(); - - row_map = Epetra_Map (0, 0, Utilities::Trilinos::comm_self()); - col_map = row_map; - - graph = std::auto_ptr - (new Epetra_FECrsGraph(View, row_map, 0)); + column_space_map = std::auto_ptr + (new Epetra_Map (0, 0, Utilities::Trilinos::comm_self())); + graph = std::auto_ptr + (new Epetra_FECrsGraph(View, *column_space_map, *column_space_map, 0)); graph->FillComplete(); compressed = true; @@ -570,8 +551,11 @@ namespace TrilinosWrappers SparsityPattern::compress () { int ierr; - ierr = graph->GlobalAssemble (col_map, row_map, true); - + Assert (&* column_space_map != 0, ExcInternalError()); + ierr = graph->GlobalAssemble (*column_space_map, + static_cast(graph->RangeMap()), + true); + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); ierr = graph->OptimizeStorage (); @@ -602,7 +586,7 @@ namespace TrilinosWrappers } else { - // Check whether the matrix + // Check whether the matrix // already is transformed to // local indices. if (graph->Filled() == false) @@ -701,7 +685,7 @@ namespace TrilinosWrappers if (graph->Filled() == true) n_cols = graph -> NumGlobalCols(); else - n_cols = col_map.NumGlobalElements(); + n_cols = column_space_map->NumGlobalElements(); return n_cols; } @@ -724,7 +708,7 @@ namespace TrilinosWrappers unsigned int begin, end; begin = graph -> RowMap().MinMyGID(); end = graph -> RowMap().MaxMyGID()+1; - + return std::make_pair (begin, end); } @@ -783,19 +767,25 @@ namespace TrilinosWrappers // ouput is generated in case of // multiple processors. void - SparsityPattern::print (std::ostream &out) const + SparsityPattern::print (std::ostream &out, + const bool write_extended_trilinos_info) const { - int * indices; - int num_entries; - - for (int i=0; iNumMyRows(); ++i) + if (write_extended_trilinos_info) + out << *graph; + else { - graph->ExtractMyRowView (i, num_entries, indices); - for (int j=0; jGRID(j)] << ") " - << std::endl; + int * indices; + int num_entries; + + for (int i=0; iNumMyRows(); ++i) + { + graph->ExtractMyRowView (i, num_entries, indices); + for (int j=0; jGRID(j)] << ") " + << std::endl; + } } - + AssertThrow (out, ExcIO()); } @@ -803,7 +793,7 @@ namespace TrilinosWrappers void SparsityPattern::print_gnuplot (std::ostream &out) const - { + { Assert (graph->Filled() == true, ExcInternalError()); for (unsigned int row=0; rowGRID(j)] << " " << -static_cast(row) + out << indices[graph->GRID(j)] << " " << -static_cast(row) << std::endl; - } + } AssertThrow (out, ExcIO()); } @@ -841,30 +831,32 @@ namespace TrilinosWrappers template void SparsityPattern::reinit (const Epetra_Map &, - const dealii::SparsityPattern &); + const dealii::SparsityPattern &, + bool); template void SparsityPattern::reinit (const Epetra_Map &, - const dealii::CompressedSparsityPattern &); + const dealii::CompressedSparsityPattern &, + bool); template void SparsityPattern::reinit (const Epetra_Map &, - const dealii::CompressedSetSparsityPattern &); + const dealii::CompressedSetSparsityPattern &, + bool); template void SparsityPattern::reinit (const Epetra_Map &, - const dealii::CompressedSimpleSparsityPattern &); + const dealii::CompressedSimpleSparsityPattern &, + bool); template void SparsityPattern::reinit (const Epetra_Map &, const Epetra_Map &, - const dealii::SparsityPattern &); - template void - SparsityPattern::reinit (const Epetra_Map &, - const Epetra_Map &, - const dealii::CompressedSparsityPattern &); + const dealii::SparsityPattern &, + bool); template void SparsityPattern::reinit (const Epetra_Map &, const Epetra_Map &, - const dealii::CompressedSimpleSparsityPattern &); + const dealii::CompressedSparsityPattern &, + bool); } diff --git a/deal.II/lac/source/trilinos_vector.cc b/deal.II/lac/source/trilinos_vector.cc index f984ce003f..01b05c724b 100644 --- a/deal.II/lac/source/trilinos_vector.cc +++ b/deal.II/lac/source/trilinos_vector.cc @@ -12,14 +12,13 @@ //--------------------------------------------------------------------------- -#include #include -#include - #ifdef DEAL_II_USE_TRILINOS -#include +# include +# include +# include DEAL_II_NAMESPACE_OPEN @@ -31,37 +30,32 @@ namespace TrilinosWrappers Vector::Vector () - : - communicator (Utilities::Trilinos:: - duplicate_communicator(Utilities::Trilinos::comm_self())), - map (0, 0, *communicator) { last_action = Zero; - vector = std::auto_ptr (new Epetra_FEVector(map)); + vector = std::auto_ptr + (new Epetra_FEVector(Epetra_Map(0,0,0,Utilities::Trilinos::comm_self()))); } - Vector::Vector (const Epetra_Map &InputMap) - : - communicator (Utilities::Trilinos:: - duplicate_communicator (InputMap.Comm())), - map (Utilities::Trilinos:: - duplicate_map (InputMap, *communicator)) + Vector::Vector (const Epetra_Map &input_map) { - last_action = Zero; - vector = std::auto_ptr (new Epetra_FEVector(map)); + reinit (input_map); + } + + + + Vector::Vector (const IndexSet ¶llel_partitioner, + const MPI_Comm &communicator) + { + reinit (parallel_partitioner, communicator); } Vector::Vector (const Vector &v) : - VectorBase(), - communicator (Utilities::Trilinos:: - duplicate_communicator (*v.communicator)), - map (Utilities::Trilinos::duplicate_map (v.map, - *communicator)) + VectorBase() { last_action = Zero; vector = std::auto_ptr (new Epetra_FEVector(*v.vector)); @@ -69,36 +63,50 @@ namespace TrilinosWrappers - Vector::Vector (const Epetra_Map &InputMap, + Vector::Vector (const Epetra_Map &input_map, const VectorBase &v) : - VectorBase(), - communicator (Utilities::Trilinos:: - duplicate_communicator (InputMap.Comm())), - map (Utilities::Trilinos:: - duplicate_map (InputMap, *communicator)) + VectorBase() { - AssertThrow (map.NumGlobalElements() == v.vector->Map().NumGlobalElements(), - ExcDimensionMismatch (map.NumGlobalElements(), + AssertThrow (input_map.NumGlobalElements() == v.vector->Map().NumGlobalElements(), + ExcDimensionMismatch (input_map.NumGlobalElements(), v.vector->Map().NumGlobalElements())); last_action = Zero; - if (map.SameAs(v.vector->Map()) == true) + if (input_map.SameAs(v.vector->Map()) == true) vector = std::auto_ptr (new Epetra_FEVector(*v.vector)); else { - vector = std::auto_ptr (new Epetra_FEVector(map)); + vector = std::auto_ptr (new Epetra_FEVector(input_map)); reinit (v, false, true); } } - Vector::~Vector () + Vector::Vector (const IndexSet ¶llel_partitioner, + const VectorBase &v, + const MPI_Comm &communicator) + : + VectorBase() { - Utilities::Trilinos::destroy_communicator (*communicator); + AssertThrow ((int)parallel_partitioner.size() == v.vector->Map().NumGlobalElements(), + ExcDimensionMismatch (parallel_partitioner.size(), + v.vector->Map().NumGlobalElements())); + + last_action = Zero; + + vector = std::auto_ptr + (new Epetra_FEVector(parallel_partitioner.make_trilinos_map(communicator, + true))); + reinit (v, false, true); } + + + + Vector::~Vector () + {} @@ -106,24 +114,28 @@ namespace TrilinosWrappers Vector::reinit (const Epetra_Map &input_map, const bool fast) { - vector.reset(); - - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator(input_map.Comm())); - map = Utilities::Trilinos::duplicate_map (input_map, *communicator); - - - vector = std::auto_ptr (new Epetra_FEVector(map)); - - if (fast == false) + if (!vector->Map().SameAs(input_map)) + vector = std::auto_ptr (new Epetra_FEVector(input_map)); + else if (fast == false) { const int ierr = vector->PutScalar(0.); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + Assert (ierr == 0, ExcTrilinosError(ierr)); } last_action = Zero; } + + + + void + Vector::reinit (const IndexSet ¶llel_partitioner, + const MPI_Comm &communicator, + const bool fast) + { + Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator, + true); + reinit (map, fast); + } @@ -144,14 +156,9 @@ namespace TrilinosWrappers if (vector->Map().SameAs(v.vector->Map()) == false) { vector.reset(); - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator(v.trilinos_vector() - .Map().Comm())); - map = Utilities::Trilinos:: - duplicate_map (v.trilinos_vector().Map(), *communicator); - - vector = std::auto_ptr (new Epetra_FEVector(map)); + + vector = std::auto_ptr + (new Epetra_FEVector(v.vector->Map())); last_action = Zero; } else if (fast == false) @@ -238,15 +245,6 @@ namespace TrilinosWrappers else { vector.reset(); - - Utilities::Trilinos::destroy_communicator (*communicator); - communicator.reset (Utilities::Trilinos:: - duplicate_communicator(v.trilinos_vector(). - Map().Comm())); - map = Utilities::Trilinos:: - duplicate_map (v.trilinos_vector().Map(), - *communicator); - vector = std::auto_ptr (new Epetra_FEVector(*v.vector)); last_action = Zero; @@ -287,7 +285,7 @@ namespace TrilinosWrappers if (vector->Map().SameAs(m.col_partitioner()) == false) { - map = m.col_partitioner(); + Epetra_Map map = m.col_partitioner(); vector = std::auto_ptr (new Epetra_FEVector(map)); } @@ -305,43 +303,40 @@ namespace TrilinosWrappers Vector::Vector () - : - map (0, 0, - Utilities::Trilinos::comm_self()) { last_action = Zero; + Epetra_LocalMap map (0, 0, Utilities::Trilinos::comm_self()); vector = std::auto_ptr (new Epetra_FEVector(map)); } Vector::Vector (const unsigned int n) - : - map ((int)n, 0, - Utilities::Trilinos::comm_self()) { last_action = Zero; + Epetra_LocalMap map ((int)n, 0, Utilities::Trilinos::comm_self()); vector = std::auto_ptr (new Epetra_FEVector (map)); } - Vector::Vector (const Epetra_Map &InputMap) - : - map (InputMap.NumGlobalElements(), InputMap.IndexBase(), - InputMap.Comm()) + Vector::Vector (const Epetra_Map &input_map) { last_action = Zero; + Epetra_LocalMap map (input_map.NumGlobalElements(), + input_map.IndexBase(), + input_map.Comm()); vector = std::auto_ptr (new Epetra_FEVector(map)); } Vector::Vector (const VectorBase &v) - : - map (v.vector->Map().NumGlobalElements(), 0, v.vector->Comm()) { last_action = Zero; + Epetra_LocalMap map (v.vector->Map().NumGlobalElements(), + v.vector->Map().IndexBase(), + v.vector->Map().Comm()); vector = std::auto_ptr (new Epetra_FEVector(map)); if (vector->Map().SameAs(v.vector->Map()) == true) @@ -364,9 +359,8 @@ namespace TrilinosWrappers { vector.reset(); - map = Epetra_LocalMap ((int)n, 0, - Utilities::Trilinos::comm_self()); - + Epetra_LocalMap map ((int)n, 0, + Utilities::Trilinos::comm_self()); vector = std::auto_ptr (new Epetra_FEVector (map)); } else if (fast == false) @@ -388,12 +382,12 @@ namespace TrilinosWrappers Vector::reinit (const Epetra_Map &input_map, const bool fast) { - if (map.NumGlobalElements() != input_map.NumGlobalElements()) + if (vector->Map().NumGlobalElements() != input_map.NumGlobalElements()) { vector.reset(); - map = Epetra_LocalMap (input_map.NumGlobalElements(), - input_map.IndexBase(), - input_map.Comm()); + Epetra_LocalMap map (input_map.NumGlobalElements(), + input_map.IndexBase(), + input_map.Comm()); vector = std::auto_ptr (new Epetra_FEVector (map)); } else if (fast == false) @@ -427,10 +421,9 @@ namespace TrilinosWrappers if (local_range() != v.local_range()) { vector.reset(); - map = Epetra_LocalMap (v.vector->GlobalLength(), - v.vector->Map().IndexBase(), - v.vector->Comm()); - + Epetra_LocalMap map (v.vector->GlobalLength(), + v.vector->Map().IndexBase(), + v.vector->Comm()); vector = std::auto_ptr (new Epetra_FEVector(map)); } else @@ -484,9 +477,9 @@ namespace TrilinosWrappers if (size() != v.size()) { vector.reset(); - map = Epetra_LocalMap (v.vector->Map().NumGlobalElements(), - v.vector->Map().IndexBase(), - v.vector->Comm()); + Epetra_LocalMap map (v.vector->Map().NumGlobalElements(), + v.vector->Map().IndexBase(), + v.vector->Comm()); vector = std::auto_ptr (new Epetra_FEVector(map)); } @@ -501,9 +494,9 @@ namespace TrilinosWrappers { if (size() != v.size()) { - map = Epetra_LocalMap (v.vector->Map().NumGlobalElements(), - v.vector->Map().IndexBase(), - v.vector->Comm()); + Epetra_LocalMap map (v.vector->Map().NumGlobalElements(), + v.vector->Map().IndexBase(), + v.vector->Comm()); vector = std::auto_ptr (new Epetra_FEVector(map)); } diff --git a/deal.II/lac/source/trilinos_vector_base.cc b/deal.II/lac/source/trilinos_vector_base.cc index d330d84c12..5fba85de93 100644 --- a/deal.II/lac/source/trilinos_vector_base.cc +++ b/deal.II/lac/source/trilinos_vector_base.cc @@ -14,11 +14,10 @@ #include -#include - #ifdef DEAL_II_USE_TRILINOS -#include +# include +# include DEAL_II_NAMESPACE_OPEN