From: heltai Date: Sun, 5 Jan 2014 19:09:16 +0000 (+0000) Subject: Merged from trunk. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5b87e1cca26513c05c369bb6a6ba3ec3862ba208;p=dealii-svn.git Merged from trunk. git-svn-id: https://svn.dealii.org/branches/branch_manifold_id@32170 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/bundled/CMakeLists.txt b/deal.II/bundled/CMakeLists.txt index c2ba25e553..b4ff8c8bf6 100644 --- a/deal.II/bundled/CMakeLists.txt +++ b/deal.II/bundled/CMakeLists.txt @@ -33,7 +33,12 @@ IF(FEATURE_BOOST_BUNDLED_CONFIGURED) ) ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/serialization/src) - ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/iostreams/src) + + IF(DEAL_II_WITH_ZLIB) + ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/iostreams/src) + ELSE() + MESSAGE(STATUS "BOOST::Iostreams will not be available because zlib or its header files could not be found") + ENDIF() IF(DEAL_II_WITH_THREADS AND NOT DEAL_II_WITH_CXX11) ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/thread/src) diff --git a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/CMakeLists.txt b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/CMakeLists.txt index 07e336c8cc..ef8f84838c 100644 --- a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/CMakeLists.txt +++ b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/CMakeLists.txt @@ -16,28 +16,15 @@ SET(src_boost_iostreams file_descriptor.cpp - gzip.cpp + gzip.cpp mapped_file.cpp - ) - -IF(DEAL_II_WITH_ZLIB) - SET(src_boost_iostreams - ${src_boost_iostreams} - zlib.cpp - ) -ELSE() - MESSAGE(STATUS "BOOST::Iostreams will not support gzipped streams because libz or its header files could not be found") -ENDIF() + zlib.cpp + ) IF(DEALII_WITH_BZIP2) - SET(src_boost_iostreams - ${src_boost_iostreams} - bzip2.cpp - ) + LIST(APPEND src_boost_iostreams bzip2.cpp) ELSE() MESSAGE(STATUS "BOOST::Iostreams will not support bz2'ed streams because libbz2 or its header files could not be found") ENDIF() - DEAL_II_ADD_LIBRARY(obj_boost_iostreams OBJECT ${src_boost_iostreams}) - diff --git a/deal.II/cmake/modules/FindDEALII_LAPACK.cmake b/deal.II/cmake/modules/FindDEALII_LAPACK.cmake index 903c19235d..378851f71c 100644 --- a/deal.II/cmake/modules/FindDEALII_LAPACK.cmake +++ b/deal.II/cmake/modules/FindDEALII_LAPACK.cmake @@ -51,8 +51,11 @@ SET(CMAKE_PREFIX_PATH ${_cmake_prefix_path_backup}) MARK_AS_ADVANCED( atlas_LIBRARY + atlcblas_LIBRARY + atllapack_LIBRARY blas_LIBRARY eigen_blas_LIBRARY + f77blas_LIBRARY gslcblas_LIBRARY lapack_LIBRARY m_LIBRARY diff --git a/deal.II/cmake/setup_cached_variables.cmake b/deal.II/cmake/setup_cached_variables.cmake index 3cad88e7d2..c4fc91f1dd 100644 --- a/deal.II/cmake/setup_cached_variables.cmake +++ b/deal.II/cmake/setup_cached_variables.cmake @@ -49,6 +49,10 @@ # # DEAL_II_WITH_64BIT_INDICES # +# Miscellaneous options: +# DEAL_II_DOXYGEN_USE_MATHJAX +# +# # *) May also be set via environment variable (CXXFLAGS, LDFLAGS) # (a nonempty cached variable has precedence and will not be # overwritten by environment) @@ -331,6 +335,12 @@ OPTION(DEAL_II_WITH_64BIT_INDICES # # ######################################################################## +OPTION(DEAL_II_DOXYGEN_USE_MATHJAX + "If set to ON, doxygen documentation is generated using mathjax" + OFF + ) +MARK_AS_ADVANCED(DEAL_II_DOXYGEN_USE_MATHJAX) + # # We do not support installation into the binary directory any more ("too # much pain, not enough profit"): diff --git a/deal.II/doc/doxygen/CMakeLists.txt b/deal.II/doc/doxygen/CMakeLists.txt index cdeeeebb78..e813105703 100644 --- a/deal.II/doc/doxygen/CMakeLists.txt +++ b/deal.II/doc/doxygen/CMakeLists.txt @@ -50,6 +50,11 @@ ADD_SUBDIRECTORY(tutorial) # uses ${DEAL_II_STEPS} # # Prepare auxiliary files for doxygen: # +IF(DEAL_II_DOXYGEN_USE_MATHJAX) + SET(_use_mathjax YES) +ELSE() + SET(_use_mathjax NO) +ENDIF() CONFIGURE_FILE( ${CMAKE_CURRENT_SOURCE_DIR}/options.dox.in ${CMAKE_CURRENT_BINARY_DIR}/options.dox diff --git a/deal.II/doc/doxygen/headers/vector_valued.h b/deal.II/doc/doxygen/headers/vector_valued.h index 2edcd2d5a8..b2ad033baa 100644 --- a/deal.II/doc/doxygen/headers/vector_valued.h +++ b/deal.II/doc/doxygen/headers/vector_valued.h @@ -126,7 +126,6 @@ * v=(0,0,v3)T. writing the outcomes below * each other, we obtain the system * @f[ - * \arraycolsep1pt * \begin{matrix} * (\nabla u_1,\nabla v_1) &&& = (f_1, v_1) * \\ @@ -144,7 +143,6 @@ * exhibit this system structure. Let us close by writing the full * system of the elastic equation with symmetric gradient D: * @f[ - * \arraycolsep1pt * \begin{matrix} * (\nabla u_1,\nabla v_1) + (\partial_1 u_1,\partial_1 v_1) * & (\partial_1 u_2,\partial_2 v_1) diff --git a/deal.II/doc/doxygen/options.dox.in b/deal.II/doc/doxygen/options.dox.in index 851d8954c8..88ca8938d2 100644 --- a/deal.II/doc/doxygen/options.dox.in +++ b/deal.II/doc/doxygen/options.dox.in @@ -98,7 +98,7 @@ HTML_OUTPUT = deal.II HTML_FILE_EXTENSION = .html HTML_HEADER = header.html HTML_FOOTER = footer.html -USE_MATHJAX = NO +USE_MATHJAX = @_use_mathjax@ MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols HTML_COLORSTYLE_HUE = 220 diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h index e398c01f87..8b705acd64 100644 --- a/deal.II/doc/news/changes.h +++ b/deal.II/doc/news/changes.h @@ -1,4 +1,4 @@ -// --------------------------------------------------------------------- + // --------------------------------------------------------------------- // $Id$ // // Copyright (C) 2013 by the deal.II authors @@ -106,7 +106,9 @@ inconvenience this causes. written into as an optional argument. This allows for pre-allocating all possible entries right away, which makes writing into the matrix from several threads possible (otherwise, only one processor at a time can write - off-processor data). + off-processor data). Similarly, TrilinosWrappers::MPI::Vector objects can + be initialized with hints to ghost elements for a writable vector that can + be added into from multiple threads.
(Martin Kronbichler, 2013/12/23) diff --git a/deal.II/doc/publications/dealii.bib b/deal.II/doc/publications/dealii.bib index c6a93e4c2d..f4d43108b4 100644 --- a/deal.II/doc/publications/dealii.bib +++ b/deal.II/doc/publications/dealii.bib @@ -12,11 +12,19 @@ } +@article{dealII81, + title = {The {\tt deal.{I}{I}} Library, Version 8.1}, + author = {W. Bangerth and T. Heister and L. Heltai and G. Kanschat + and M. Kronbichler and M. Maier and B. Turcksin and T. D. Young}, + journal = {arXiv preprint \url{http://arxiv.org/abs/1312.2266v4}}, + year = {2013}, +} + @article{dealII80, title = {The {\tt deal.{I}{I}} Library, Version 8.0}, author = {W. Bangerth and T. Heister and L. Heltai and G. Kanschat and M. Kronbichler and M. Maier and B. Turcksin and T. D. Young}, - journal = {arXiv preprint \url{http://arxiv.org/abs/1312.2266}}, + journal = {arXiv preprint \url{http://arxiv.org/abs/1312.2266v3}}, year = {2013}, } diff --git a/deal.II/doc/publications/index.html b/deal.II/doc/publications/index.html index 1c495eb61c..cbcfc73874 100644 --- a/deal.II/doc/publications/index.html +++ b/deal.II/doc/publications/index.html @@ -77,6 +77,23 @@

    +
  1. + Wolfgang Bangerth, Timo Heister, Luca Heltai, Guido Kanschat, Martin Kronbichler, Matthias Maier, and Toby D. Young
    + The deal.II Library, Version 8.1 + +
    + arxiv:1312.2266v4, 2013. +
    +
    +@article{dealII81,
    +  title = {The {\tt deal.{I}{I}} Library, Version 8.1},
    +  author = {W. Bangerth and T. Heister and L. Heltai and G. Kanschat
    +   and M. Kronbichler and M. Maier and B. Turcksin and T. D. Young},
    +  journal = {arXiv preprint \url{http://arxiv.org/abs/1312.2266v4}},
    +  year = {2013},
    +}
    +
    +
  2. W. Bangerth,
  3. +
+ +

+ For your convenience, we provide a BibTeX file with entries + for the publications in this section. +

+ + +

Older Releases

+ +
  1. Wolfgang Bangerth, Timo Heister, Luca Heltai, Guido Kanschat, Martin Kronbichler, Matthias Maier, and Toby D. Young
    The deal.II Library, Version 8.0
    - arxiv:1312.2266, 2013. + arxiv:1312.2266v3, 2013.
     @article{dealII80,
       title = {The {\tt deal.{I}{I}} Library, Version 8.0},
       author = {W. Bangerth and T. Heister and L. Heltai and G. Kanschat
        and M. Kronbichler and M. Maier and B. Turcksin and T. D. Young},
    -  journal = {arXiv preprint \url{http://arxiv.org/abs/1312.2266}},
    +  journal = {arXiv preprint \url{http://arxiv.org/abs/1312.2266v3}},
       year = {2013},
     }
     
-

- For your convenience, we provide a BibTeX file with entries - for the publications in this section. -

- - diff --git a/deal.II/examples/step-12/step-12.cc b/deal.II/examples/step-12/step-12.cc index 26c26d3acf..1bae0ad4a5 100644 --- a/deal.II/examples/step-12/step-12.cc +++ b/deal.II/examples/step-12/step-12.cc @@ -306,7 +306,7 @@ namespace Step12 &AdvectionProblem::integrate_cell_term, &AdvectionProblem::integrate_boundary_term, &AdvectionProblem::integrate_face_term, - assembler, true); + assembler); } diff --git a/deal.II/include/deal.II/lac/block_sparsity_pattern.h b/deal.II/include/deal.II/lac/block_sparsity_pattern.h index e22741f97c..8082826f24 100644 --- a/deal.II/include/deal.II/lac/block_sparsity_pattern.h +++ b/deal.II/include/deal.II/lac/block_sparsity_pattern.h @@ -1060,6 +1060,22 @@ namespace TrilinosWrappers BlockSparsityPattern (const std::vector ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD); + /** + * Initialize the pattern with two arrays of index sets that specify rows + * and columns of the matrix, where the size() of the IndexSets specifies + * the size of the blocks and the values in each IndexSet denotes the rows + * that are going to be saved in each block. The additional index set + * writable_rows is used to set all rows that we allow to write + * locally. This constructor is used to create matrices that allow several + * threads to write simultaneously into the matrix (to different rows, of + * course), see the method TrilinosWrappers::SparsityPattern::reinit + * method with three index set arguments for more details. + */ + BlockSparsityPattern (const std::vector &row_parallel_partitioning, + const std::vector &column_parallel_partitioning, + const std::vector &writeable_rows, + const MPI_Comm &communicator = MPI_COMM_WORLD); + /** * Resize the matrix to a tensor product of matrices with dimensions * defined by the arguments. diff --git a/deal.II/include/deal.II/lac/relaxation_block.h b/deal.II/include/deal.II/lac/relaxation_block.h index 6e6bb9bb15..994a5c502c 100644 --- a/deal.II/include/deal.II/lac/relaxation_block.h +++ b/deal.II/include/deal.II/lac/relaxation_block.h @@ -86,19 +86,6 @@ public: const bool invert_diagonal = true, const bool same_diagonal = false); - /** - * @deprecated Since the BlockList is now a data member of this - * class, it is not recommended anymore to generate it - * independently. - * - * Constructor. - */ - AdditionalData (const BlockList &block_list, - const double relaxation = 1., - const bool invert_diagonal = true, - const bool same_diagonal = false) DEAL_II_DEPRECATED; - - /** * The mapping from indices to blocks. */ diff --git a/deal.II/include/deal.II/lac/relaxation_block.templates.h b/deal.II/include/deal.II/lac/relaxation_block.templates.h index 621ed76280..8028f74acf 100644 --- a/deal.II/include/deal.II/lac/relaxation_block.templates.h +++ b/deal.II/include/deal.II/lac/relaxation_block.templates.h @@ -51,24 +51,6 @@ RelaxationBlock::AdditionalData::memory_consumption() const } -template -inline -RelaxationBlock::AdditionalData::AdditionalData ( - const BlockList &bl, - const double relaxation, - const bool invert_diagonal, - const bool same_diagonal) - : - relaxation(relaxation), - invert_diagonal(invert_diagonal), - same_diagonal(same_diagonal), - inversion(PreconditionBlockBase::gauss_jordan), - threshold(0.) -{ - bl.create_sparsity_pattern(block_list, 0); -} - - template inline void diff --git a/deal.II/include/deal.II/lac/trilinos_block_vector.h b/deal.II/include/deal.II/lac/trilinos_block_vector.h index 9006b134c2..276f778902 100644 --- a/deal.II/include/deal.II/lac/trilinos_block_vector.h +++ b/deal.II/include/deal.II/lac/trilinos_block_vector.h @@ -71,20 +71,17 @@ namespace TrilinosWrappers { public: /** - * Typedef the base class for simpler - * access to its own typedefs. + * Typedef the base class for simpler access to its own typedefs. */ typedef BlockVectorBase BaseClass; /** - * Typedef the type of the underlying - * vector. + * Typedef the type of the underlying vector. */ typedef BaseClass::BlockType BlockType; /** - * Import the typedefs from the base - * class. + * Import the typedefs from the base class. */ typedef BaseClass::value_type value_type; typedef BaseClass::pointer pointer; @@ -96,90 +93,59 @@ namespace TrilinosWrappers typedef BaseClass::const_iterator const_iterator; /** - * Default constructor. Generate an - * empty vector without any blocks. + * Default constructor. Generate an empty vector without any blocks. */ BlockVector (); /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in Input_Maps. - * For this non-distributed vector, - * the %parallel partitioning is not - * used, just the global size of the - * partitioner. + * Constructor. Generate a block vector with as many blocks as there are + * entries in Input_Maps. For this non-distributed vector, the %parallel + * partitioning is not used, just the global size of the partitioner. */ explicit BlockVector (const std::vector &partitioner); /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in Input_Maps. - * For this non-distributed vector, - * the %parallel partitioning is not - * used, just the global size of the - * partitioner. + * Constructor. Generate a block vector with as many blocks as there are + * entries in Input_Maps. For this non-distributed vector, the %parallel + * partitioning is not used, just the global size of the partitioner. */ explicit BlockVector (const std::vector &partitioner, const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Copy-Constructor. Set all the - * properties of the non-%parallel - * vector to those of the given - * %parallel vector and import the - * elements. + * Copy-Constructor. Set all the properties of the non-%parallel vector to + * those of the given %parallel vector and import the elements. */ BlockVector (const MPI::BlockVector &V); /** - * Copy-Constructor. Set all the - * properties of the vector to those - * of the given input vector and copy - * the elements. + * Copy-Constructor. Set all the properties of the vector to those of the + * given input vector and copy the elements. */ BlockVector (const BlockVector &V); /** - * Creates a block vector - * consisting of - * num_blocks - * components, but there is no - * content in the individual - * components and the user has to - * fill appropriate data using a - * reinit of the blocks. + * Creates a block vector consisting of num_blocks components, + * but there is no content in the individual components and the user has + * to fill appropriate data using a reinit of the blocks. */ explicit BlockVector (const size_type num_blocks); /** - * Constructor. Set the number of - * blocks to n.size() and - * initialize each block with - * n[i] zero elements. + * Constructor. Set the number of blocks to n.size() and + * initialize each block with n[i] zero elements. * * References BlockVector.reinit(). */ explicit BlockVector (const std::vector &N); /** - * Constructor. Set the number of - * blocks to - * n.size(). Initialize the - * vector with the elements - * pointed to by the range of - * iterators given as second and - * third argument. Apart from the - * first argument, this - * constructor is in complete - * analogy to the respective - * constructor of the - * std::vector class, but the - * first argument is needed in - * order to know how to subdivide - * the block vector into - * different blocks. + * Constructor. Set the number of blocks to n.size(). Initialize + * the vector with the elements pointed to by the range of iterators given + * as second and third argument. Apart from the first argument, this + * constructor is in complete analogy to the respective constructor of the + * std::vector class, but the first argument is needed in order + * to know how to subdivide the block vector into different blocks. */ template BlockVector (const std::vector &n, @@ -196,8 +162,7 @@ namespace TrilinosWrappers * * @deprecated * - * See @ref GlossCompress "Compressing - * distributed objects" for more + * See @ref GlossCompress "Compressing distributed objects" for more * information. */ void compress (const Epetra_CombineMode last_action) DEAL_II_DEPRECATED; @@ -208,91 +173,60 @@ namespace TrilinosWrappers using BlockVectorBase::compress; /** - * Copy operator: fill all - * components of the vector that - * are locally stored with the - * given scalar value. + * Copy operator: fill all components of the vector that are locally + * stored with the given scalar value. */ BlockVector & operator = (const value_type s); /** - * Copy operator for a - * distributed Trilinos vector to - * a localized one. + * Copy operator for a distributed Trilinos vector to a localized one. */ BlockVector & operator = (const MPI::BlockVector &V); /** - * Copy operator for arguments of - * the same type. + * Copy operator for arguments of the same type. */ BlockVector & operator = (const BlockVector &V); /** - * Another copy function. This - * one takes a deal.II block - * vector and copies it into a - * TrilinosWrappers block - * vector. Note that the number - * of blocks has to be the same - * in the vector as in the input - * vector. Use the reinit() - * command for resizing the - * BlockVector or for changing - * the internal structure of the - * block components. + * Another copy function. This one takes a deal.II block vector and copies + * it into a TrilinosWrappers block vector. Note that the number of blocks + * has to be the same in the vector as in the input vector. Use the + * reinit() command for resizing the BlockVector or for changing the + * internal structure of the block components. * - * Since Trilinos only works on - * doubles, this function is - * limited to accept only one - * possible number type in the - * deal.II vector. + * Since Trilinos only works on doubles, this function is limited to + * accept only one possible number type in the deal.II vector. */ template BlockVector & operator = (const ::dealii::BlockVector &V); /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are Epetra_Maps given in the - * input argument, according to the - * global size of the individual - * components described in the - * maps. Note that the resulting - * vector will be stored completely - * on each process. The Epetra_Map - * is useful when data exchange - * with a distributed vector based - * on the same Epetra_map is - * intended. In that case, the same - * communicator is used for data - * exchange. + * Reinitialize the BlockVector to contain as many blocks as there are + * Epetra_Maps given in the input argument, according to the global size + * of the individual components described in the maps. Note that the + * resulting vector will be stored completely on each process. The + * Epetra_Map is useful when data exchange with a distributed vector based + * on the same Epetra_map is intended. In that case, the same communicator + * is used for data exchange. * - * If fast==false, the vector - * is filled with zeros. + * If fast==false, the vector is filled with zeros. */ void reinit (const std::vector &partitioning, const bool fast = false); /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are index sets given in the - * input argument, according to the - * global size of the individual - * components described in the - * index set, and using a given MPI - * communicator. The MPI - * communicator is useful when data - * exchange with a distributed - * vector based on the same - * initialization is intended. In - * that case, the same communicator - * is used for data exchange. + * Reinitialize the BlockVector to contain as many blocks as there are + * index sets given in the input argument, according to the global size of + * the individual components described in the index set, and using a given + * MPI communicator. The MPI communicator is useful when data exchange + * with a distributed vector based on the same initialization is + * intended. In that case, the same communicator is used for data + * exchange. * * If fast==false, the vector * is filled with zeros. @@ -302,13 +236,9 @@ namespace TrilinosWrappers const bool fast = false); /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are elements in the first - * argument, and with the respective - * sizes. Since no distribution map - * is given, all vectors are local - * vectors. + * Reinitialize the BlockVector to contain as many blocks as there are + * elements in the first argument, and with the respective sizes. Since no + * distribution map is given, all vectors are local vectors. * * If fast==false, the vector * is filled with zeros. @@ -324,76 +254,44 @@ namespace TrilinosWrappers void reinit (const MPI::BlockVector &V); /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. + * Change the dimension to that of the vector V. The same applies + * as for the other reinit() function. * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). + * The elements of V are not copied, i.e. this function is the + * same as calling reinit (V.size(), fast). * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() on one of the blocks, then subsequent + * actions on this object may yield unpredictable results since they may + * be routed to the wrong block. */ void reinit (const BlockVector &V, const bool fast = false); /** - * Change the number of blocks to - * num_blocks. The individual - * blocks will get initialized with - * zero size, so it is assumed that - * the user resizes the - * individual blocks by herself - * in an appropriate way, and - * calls collect_sizes - * afterwards. + * Change the number of blocks to num_blocks. The individual + * blocks will get initialized with zero size, so it is assumed that the + * user resizes the individual blocks by herself in an appropriate way, + * and calls collect_sizes afterwards. */ void reinit (const size_type num_blocks); /** - * Swap the contents of this - * vector and the other vector - * v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. + * Swap the contents of this vector and the other vector v. One + * could do this operation with a temporary variable and copying over the + * data elements, but this function is significantly more efficient since + * it only swaps the pointers to the data of the two vectors and therefore + * does not need to allocate temporary storage and move data around. * - * Limitation: right now this - * function only works if both - * vectors have the same number - * of blocks. If needed, the - * numbers of blocks should be + * Limitation: right now this function only works if both vectors have the + * same number of blocks. If needed, the numbers of blocks should be * exchanged, too. * - * This function is analog to the - * the swap() function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. + * This function is analog to the the swap() function of all C++ standard + * containers. Also, there is a global function swap(u,v) that simply + * calls u.swap(v), again in analogy to standard functions. */ void swap (BlockVector &v); diff --git a/deal.II/include/deal.II/lac/trilinos_parallel_block_vector.h b/deal.II/include/deal.II/lac/trilinos_parallel_block_vector.h index 7ec7a0e217..4e990c1469 100644 --- a/deal.II/include/deal.II/lac/trilinos_parallel_block_vector.h +++ b/deal.II/include/deal.II/lac/trilinos_parallel_block_vector.h @@ -73,20 +73,17 @@ namespace TrilinosWrappers { public: /** - * Typedef the base class for simpler - * access to its own typedefs. + * Typedef the base class for simpler access to its own typedefs. */ typedef BlockVectorBase BaseClass; /** - * Typedef the type of the underlying - * vector. + * Typedef the type of the underlying vector. */ typedef BaseClass::BlockType BlockType; /** - * Import the typedefs from the base - * class. + * Import the typedefs from the base class. */ typedef BaseClass::value_type value_type; typedef BaseClass::pointer pointer; @@ -98,62 +95,46 @@ namespace TrilinosWrappers typedef BaseClass::const_iterator const_iterator; /** - * Default constructor. Generate an - * empty vector without any blocks. + * Default constructor. Generate an empty vector without any blocks. */ BlockVector (); /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in @p - * partitioning. Each Epetra_Map - * contains the layout of the - * distribution of data among the MPI - * processes. + * Constructor. Generate a block vector with as many blocks as there are + * entries in @p partitioning. Each Epetra_Map contains the layout of + * the distribution of data among the MPI processes. */ explicit BlockVector (const std::vector ¶llel_partitioning); /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in - * @p partitioning. Each IndexSet - * together with the MPI communicator - * contains the layout of the - * distribution of data among the MPI - * processes. + * Constructor. Generate a block vector with as many blocks as there are + * entries in @p partitioning. Each IndexSet together with the MPI + * communicator contains the layout of the distribution of data among + * the MPI processes. */ explicit BlockVector (const std::vector ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Creates a BlockVector with ghost elements. @p ghost_values - * may contain any elements in @p parallel_partitioning, they will - * be ignored. + * Creates a BlockVector with ghost elements. See the respective + * reinit() method for more details. @p ghost_values may contain any + * elements in @p parallel_partitioning, they will be ignored. */ BlockVector (const std::vector ¶llel_partitioning, const std::vector &ghost_values, - const MPI_Comm &communicator); - + const MPI_Comm &communicator, + const bool vector_writable = false); /** - * Copy-Constructor. Set all the - * properties of the parallel vector - * to those of the given argument and - * copy the elements. + * Copy-Constructor. Set all the properties of the parallel vector to + * those of the given argument and copy the elements. */ BlockVector (const BlockVector &V); /** - * Creates a block vector - * consisting of - * num_blocks - * components, but there is no - * content in the individual - * components and the user has to - * fill appropriate data using a - * reinit of the blocks. + * Creates a block vector consisting of num_blocks components, + * but there is no content in the individual components and the user has + * to fill appropriate data using a reinit of the blocks. */ explicit BlockVector (const size_type num_blocks); @@ -163,168 +144,124 @@ namespace TrilinosWrappers ~BlockVector (); /** - * Copy operator: fill all - * components of the vector that - * are locally stored with the - * given scalar value. + * Copy operator: fill all components of the vector that are locally + * stored with the given scalar value. */ BlockVector & operator = (const value_type s); /** - * Copy operator for arguments of - * the same type. + * Copy operator for arguments of the same type. */ BlockVector & operator = (const BlockVector &V); /** - * Copy operator for arguments of - * the localized Trilinos vector - * type. + * Copy operator for arguments of the localized Trilinos vector type. */ BlockVector & operator = (const ::dealii::TrilinosWrappers::BlockVector &V); /** - * Another copy function. This - * one takes a deal.II block - * vector and copies it into a - * TrilinosWrappers block - * vector. Note that the number - * of blocks has to be the same - * in the vector as in the input - * vector. Use the reinit() - * command for resizing the - * BlockVector or for changing - * the internal structure of the - * block components. + * Another copy function. This one takes a deal.II block vector and + * copies it into a TrilinosWrappers block vector. Note that the number + * of blocks has to be the same in the vector as in the input + * vector. Use the reinit() command for resizing the BlockVector or for + * changing the internal structure of the block components. * - * Since Trilinos only works on - * doubles, this function is - * limited to accept only one - * possible number type in the - * deal.II vector. + * Since Trilinos only works on doubles, this function is limited to + * accept only one possible number type in the deal.II vector. */ template BlockVector & operator = (const ::dealii::BlockVector &V); /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are Epetra_Maps given in the input - * argument, according to the - * parallel distribution of the - * individual components described - * in the maps. + * Reinitialize the BlockVector to contain as many blocks as there are + * Epetra_Maps given in the input argument, according to the parallel + * distribution of the individual components described in the maps. * - * If fast==false, the vector - * is filled with zeros. + * If fast==false, the vector is filled with zeros. */ void reinit (const std::vector ¶llel_partitioning, const bool fast = false); /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are index sets given in the input - * argument, according to the - * parallel distribution of the - * individual components described - * in the maps. + * Reinitialize the BlockVector to contain as many blocks as there are + * index sets given in the input argument, according to the parallel + * distribution of the individual components described in the maps. * - * If fast==false, the vector - * is filled with zeros. + * If fast==false, the vector is filled with zeros. */ void reinit (const std::vector ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD, const bool fast = false); + /** - * like above, but with a second set of indices for - * ghost entries. + * Reinit functionality. This function destroys the old vector content + * and generates a new one based on the input partitioning. In addition + * to just specifying one index set as in all the other methods above, + * this method allows to supply an additional set of ghost + * entries. There are two different versions of a vector that can be + * created. If the flag @p vector_writable is set to @p false, the + * vector only allows read access to the joint set of @p + * parallel_partitioning and @p ghost_entries. The effect of the reinit + * method is then equivalent to calling the other reinit method with an + * index set containing both the locally owned entries and the ghost + * entries. + * + * If the flag @p vector_writable is set to true, this creates an + * alternative storage scheme for ghost elements that allows multiple + * threads to write into the vector (for the other reinit methods, only + * one thread is allowed to write into the ghost entries at a time). */ void reinit (const std::vector &partitioning, const std::vector &ghost_values, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool vector_writable = false); /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. + * Change the dimension to that of the vector V. The same + * applies as for the other reinit() function. * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). + * The elements of V are not copied, i.e. this function is the + * same as calling reinit (V.size(), fast). * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() on one of the blocks, then subsequent + * actions on this object may yield unpredictable results since they may + * be routed to the wrong block. */ void reinit (const BlockVector &V, const bool fast = false); /** - * Change the number of blocks to - * num_blocks. The individual - * blocks will get initialized with - * zero size, so it is assumed that - * the user resizes the - * individual blocks by herself - * in an appropriate way, and - * calls collect_sizes - * afterwards. + * Change the number of blocks to num_blocks. The individual + * blocks will get initialized with zero size, so it is assumed that the + * user resizes the individual blocks by herself in an appropriate way, + * and calls collect_sizes afterwards. */ void reinit (const size_type num_blocks); /** - * This reinit function is meant to - * be used for parallel - * calculations where some - * non-local data has to be - * used. The typical situation - * where one needs this function is - * the call of the - * FEValues::get_function_values - * function (or of some - * derivatives) in parallel. Since - * it is usually faster to retrieve - * the data in advance, this - * function can be called before - * the assembly forks out to the - * different processors. What this - * function does is the following: - * It takes the information in the - * columns of the given matrix and - * looks which data couples between - * the different processors. That - * data is then queried from the - * input vector. Note that you - * should not write to the - * resulting vector any more, since - * the some data can be stored - * several times on different - * processors, leading to - * unpredictable results. In - * particular, such a vector cannot - * be used for matrix-vector - * products as for example done - * during the solution of linear - * systems. + * This reinit function is meant to be used for parallel calculations + * where some non-local data has to be used. The typical situation where + * one needs this function is the call of the + * FEValues::get_function_values function (or of some derivatives) + * in parallel. Since it is usually faster to retrieve the data in + * advance, this function can be called before the assembly forks out to + * the different processors. What this function does is the following: + * It takes the information in the columns of the given matrix and looks + * which data couples between the different processors. That data is + * then queried from the input vector. Note that you should not write to + * the resulting vector any more, since the some data can be stored + * several times on different processors, leading to unpredictable + * results. In particular, such a vector cannot be used for + * matrix-vector products as for example done during the solution of + * linear systems. */ void import_nonlocal_data_for_fe (const TrilinosWrappers::BlockSparseMatrix &m, const BlockVector &v); @@ -335,8 +272,7 @@ namespace TrilinosWrappers * * @deprecated * - * See @ref GlossCompress "Compressing - * distributed objects" for more + * See @ref GlossCompress "Compressing distributed objects" for more * information. */ void compress (const Epetra_CombineMode last_action) DEAL_II_DEPRECATED; @@ -348,17 +284,10 @@ namespace TrilinosWrappers /** - * Returns the state of the - * vector, i.e., whether - * compress() needs to be - * called after an operation - * requiring data - * exchange. Does only return - * non-true values when used in - * debug mode, since - * it is quite expensive to - * keep track of all operations - * that lead to the need for + * Returns the state of the vector, i.e., whether compress() needs to be + * called after an operation requiring data exchange. Does only return + * non-true values when used in debug mode, since it is quite + * expensive to keep track of all operations that lead to the need for * compress(). */ bool is_compressed () const; @@ -369,34 +298,21 @@ namespace TrilinosWrappers bool has_ghost_elements() const; /** - * Swap the contents of this - * vector and the other vector - * v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. + * Swap the contents of this vector and the other vector v. One + * could do this operation with a temporary variable and copying over + * the data elements, but this function is significantly more efficient + * since it only swaps the pointers to the data of the two vectors and + * therefore does not need to allocate temporary storage and move data + * around. * - * Limitation: right now this - * function only works if both - * vectors have the same number - * of blocks. If needed, the - * numbers of blocks should be + * Limitation: right now this function only works if both vectors have + * the same number of blocks. If needed, the numbers of blocks should be * exchanged, too. * - * This function is analog to the - * the swap() function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. + * This function is analog to the the swap() function of all C++ + * standard containers. Also, there is a global function swap(u,v) that + * simply calls u.swap(v), again in analogy to standard + * functions. */ void swap (BlockVector &v); @@ -446,15 +362,19 @@ namespace TrilinosWrappers } + inline BlockVector::BlockVector (const std::vector ¶llel_partitioning, const std::vector &ghost_values, - const MPI_Comm &communicator) + const MPI_Comm &communicator, + const bool vector_writable) { - reinit(parallel_partitioning, ghost_values, communicator); + reinit(parallel_partitioning, ghost_values, communicator, + vector_writable); } + inline BlockVector::BlockVector (const size_type num_blocks) { @@ -533,6 +453,7 @@ namespace TrilinosWrappers } + inline bool BlockVector::has_ghost_elements() const @@ -545,6 +466,8 @@ namespace TrilinosWrappers return ghosted; } + + inline void BlockVector::swap (BlockVector &v) diff --git a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h index 8babcea003..2a21d5daa9 100644 --- a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h @@ -452,23 +452,23 @@ namespace TrilinosWrappers * * When writing into Trilinos matrices from several threads in shared * memory, several things must be kept in mind as there is no built-in locks - * in this class to prevent data races. Therefore, simultaneous access to - * the same matrix row at the same time leads to data races in general and - * must be explicitly avoided by the user. However, it is possible to access - * different rows of the matrix from several threads simultaneously - * under the following two conditions: + * in this class to prevent data races. Simultaneous access to the same + * matrix row at the same time can lead to data races and must be explicitly + * avoided by the user. However, it is possible to access different + * rows of the matrix from several threads simultaneously under the + * following two conditions: *
    *
  • The matrix uses only one MPI process. *
  • The matrix has been initialized from a * TrilinosWrappers::SparsityPattern object that in turn has been * initialized with the reinit function specifying three index sets, one * for the rows, one for the columns and for the larger set of @p - * writeable_rows. Note that all other reinit methods and constructors of - * TrilinosWrappers::SparsityPattern will result in a matrix that needs to - * allocate off-processor entries on demand, which breaks - * thread-safety. Of course, using the respective reinit method for the - * block Trilinos sparsity pattern and block matrix also results in - * thread-safety. + * writeable_rows, and the operation is an addition. Note that all other + * reinit methods and constructors of TrilinosWrappers::SparsityPattern + * will result in a matrix that needs to allocate off-processor entries on + * demand, which breaks thread-safety. Of course, using the respective + * reinit method for the block Trilinos sparsity pattern and block matrix + * also results in thread-safety. *
* * @ingroup TrilinosWrappers @@ -2499,25 +2499,17 @@ namespace TrilinosWrappers if (ierr > 0) ierr = 0; } - else if (nonlocal_matrix.get() != 0) - { - // this is the case when we have explicitly set the off-processor - // rows and want to create a separate matrix object for them (to - // retain thread-safety) - Assert (nonlocal_matrix->RowMap().LID(static_cast(row)) != -1, - ExcMessage("Attempted to write into off-processor matrix row " - "that has not be specified as being writable upon " - "initialization")); - ierr = nonlocal_matrix->ReplaceGlobalValues(row, n_columns, - col_value_ptr, - col_index_ptr); - } else ierr = matrix->ReplaceGlobalValues (1, (TrilinosWrappers::types::int_type *)&row, n_columns, col_index_ptr, &col_value_ptr, Epetra_FECrsMatrix::ROW_MAJOR); + // use the FECrsMatrix facilities for set even in the case when we + // have explicitly set the off-processor rows because that only works + // properly when adding elements, not when setting them (since we want + // to only touch elements that have been set explicitly, and there is + // no way on the receiving processor to identify them otherwise) } Assert (ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0])); @@ -2762,8 +2754,7 @@ namespace TrilinosWrappers - // inline "simple" functions that are - // called frequently and do only involve + // inline "simple" functions that are called frequently and do only involve // a call to some Trilinos function. inline SparseMatrix::size_type diff --git a/deal.II/include/deal.II/lac/trilinos_vector.h b/deal.II/include/deal.II/lac/trilinos_vector.h index 1c4762e18c..9e7896532b 100644 --- a/deal.II/include/deal.II/lac/trilinos_vector.h +++ b/deal.II/include/deal.II/lac/trilinos_vector.h @@ -186,6 +186,18 @@ namespace TrilinosWrappers * operations at the same time, for example by placing zero additions if * necessary. * + *

Thread safety of Trilinos vectors

+ * + * When writing into Trilinos vectors from several threads in shared + * memory, several things must be kept in mind as there is no built-in + * locks in this class to prevent data races. Simultaneous access to the + * same vector entry at the same time results in data races and must be + * explicitly avoided by the user. However, it is possible to access + * different entries of the vector from several threads + * simultaneously when only one MPI process is present or the vector has + * been constructed with an additional index set for ghost entries in + * write mode. + * * @ingroup TrilinosWrappers * @ingroup Vectors * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 @@ -199,15 +211,14 @@ namespace TrilinosWrappers typedef dealii::types::global_dof_index size_type; /** - * A variable that indicates whether this vector - * supports distributed data storage. If true, then - * this vector also needs an appropriate compress() - * function that allows communicating recent set or - * add operations to individual elements to be communicated - * to other processors. + * A variable that indicates whether this vector supports distributed + * data storage. If true, then this vector also needs an appropriate + * compress() function that allows communicating recent set or add + * operations to individual elements to be communicated to other + * processors. * - * For the current class, the variable equals - * true, since it does support parallel data storage. + * For the current class, the variable equals true, since it does + * support parallel data storage. */ static const bool supports_distributed_data = true; @@ -216,20 +227,14 @@ namespace TrilinosWrappers */ //@{ /** - * Default constructor that - * generates an empty (zero size) - * vector. The function - * reinit() will have to - * give the vector the correct - * size and distribution among - * processes in case of an MPI - * run. + * Default constructor that generates an empty (zero size) vector. The + * function reinit() will have to give the vector the correct + * size and distribution among processes in case of an MPI run. */ Vector (); /** - * Copy constructor using the - * given vector. + * Copy constructor using the given vector. */ Vector (const Vector &V); @@ -239,156 +244,88 @@ namespace TrilinosWrappers ~Vector (); /** - * Reinit functionality. This - * function sets the calling vector - * to the dimension and the parallel - * distribution of the input vector, - * but does not copy the elements in - * v. If fast is - * not true, the elements in - * the vector are initialized with - * zero, otherwise the content will - * be left unchanged and the user has - * to set all elements. + * Reinit functionality. This function sets the calling vector to the + * dimension and the parallel distribution of the input vector, but does + * not copy the elements in v. If fast is not + * true, the elements in the vector are initialized with zero, + * otherwise the content will be left unchanged and the user has to set + * all elements. * - * This function has a third argument, - * allow_different_maps, - * that allows for an exchange of - * data between two equal-sized - * vectors (but being distributed - * differently among the - * processors). A trivial application - * of this function is to generate a - * replication of a whole vector on - * each machine, when the calling - * vector is built according to the - * localized vector class - * TrilinosWrappers::Vector, and - * v is a distributed - * vector. In this case, the variable - * fast needs to be set to - * false, since it does not - * make sense to exchange data - * between differently parallelized - * vectors without touching the - * elements. + * This function has a third argument, allow_different_maps, + * that allows for an exchange of data between two equal-sized vectors + * (but being distributed differently among the processors). A trivial + * application of this function is to generate a replication of a whole + * vector on each machine, when the calling vector is built according to + * the localized vector class TrilinosWrappers::Vector, and v + * is a distributed vector. In this case, the variable fast + * needs to be set to false, since it does not make sense to + * exchange data between differently parallelized vectors without + * touching the elements. */ void reinit (const VectorBase &v, const bool fast = false, const bool allow_different_maps = false); - void reinit (const BlockVector &v, - const bool import_data = false); - /** - * Reinit function. Creates a vector - * using the IndexSet local as our - * own unknowns, add optional ghost - * values ghost. + * Create vector by merging components from a block vector. */ - void reinit (const IndexSet &local, - const IndexSet &ghost, - const MPI_Comm &communicator = MPI_COMM_WORLD); + void reinit (const BlockVector &v, + const bool import_data = false); /** - * Set all components of the - * vector to the given number @p - * s. Simply pass this down to - * the base class, but we still - * need to declare this function - * to make the example given in - * the discussion about making - * the constructor explicit work. + * Set all components of the vector to the given number @p s. Simply + * pass this down to the base class, but we still need to declare this + * function to make the example given in the discussion about making the + * constructor explicit work. */ Vector &operator = (const TrilinosScalar s); /** - * Copy the given vector. Resize - * the present vector if - * necessary. In this case, also - * the Epetra_Map that designs - * the parallel partitioning is - * taken from the input vector. + * Copy the given vector. Resize the present vector if necessary. In + * this case, also the Epetra_Map that designs the parallel partitioning + * is taken from the input vector. */ Vector & operator = (const Vector &V); /** - * Copy operator from a given - * localized vector (present on - * all processes) in - * TrilinosWrappers format to the - * current distributed - * vector. This function assumes - * that the calling vector (left - * hand object) already is of the - * same size as the right hand - * side vector. Otherwise, an - * exception will be thrown. + * Copy operator from a given localized vector (present on all + * processes) in TrilinosWrappers format to the current distributed + * vector. This function assumes that the calling vector (left hand + * object) already is of the same size as the right hand side + * vector. Otherwise, an exception will be thrown. */ Vector & operator = (const ::dealii::TrilinosWrappers::Vector &V); /** - * Another copy function. This - * one takes a deal.II vector and - * copies it into a - * TrilinosWrapper vector. Note - * that since we do not provide - * any Epetra_map that tells - * about the partitioning of the - * vector among the MPI - * processes, the size of the - * TrilinosWrapper vector has to - * be the same as the size of the - * input vector. In order to - * change the map, use the - * reinit(const Epetra_Map - * &input_map) function. + * Another copy function. This one takes a deal.II vector and copies it + * into a TrilinosWrapper vector. Note that since we do not provide any + * Epetra_map that tells about the partitioning of the vector among the + * MPI processes, the size of the TrilinosWrapper vector has to be the + * same as the size of the input vector. In order to change the map, use + * the reinit(const Epetra_Map &input_map) function. */ template Vector & operator = (const ::dealii::Vector &v); /** - * This reinit function is - * meant to be used for - * parallel calculations where - * some non-local data has to - * be used. The typical - * situation where one needs - * this function is the call of - * the - * FEValues::get_function_values - * function (or of some - * derivatives) in - * parallel. Since it is - * usually faster to retrieve - * the data in advance, this - * function can be called - * before the assembly forks - * out to the different - * processors. What this - * function does is the - * following: It takes the - * information in the columns - * of the given matrix and - * looks which data couples - * between the different - * processors. That data is - * then queried from the input - * vector. Note that you should - * not write to the resulting - * vector any more, since the - * some data can be stored - * several times on different - * processors, leading to - * unpredictable results. In - * particular, such a vector - * cannot be used for - * matrix-vector products as - * for example done during the - * solution of linear systems. + * This reinit function is meant to be used for parallel calculations + * where some non-local data has to be used. The typical situation where + * one needs this function is the call of the + * FEValues::get_function_values function (or of some derivatives) + * in parallel. Since it is usually faster to retrieve the data in + * advance, this function can be called before the assembly forks out to + * the different processors. What this function does is the following: + * It takes the information in the columns of the given matrix and looks + * which data couples between the different processors. That data is + * then queried from the input vector. Note that you should not write to + * the resulting vector any more, since the some data can be stored + * several times on different processors, leading to unpredictable + * results. In particular, such a vector cannot be used for + * matrix-vector products as for example done during the solution of + * linear systems. */ void import_nonlocal_data_for_fe (const dealii::TrilinosWrappers::SparseMatrix &matrix, @@ -399,35 +336,24 @@ namespace TrilinosWrappers */ //@{ /** - * This constructor takes an - * Epetra_Map that already knows - * how to distribute the - * individual components among - * the MPI processors. Since it - * also includes information - * about the size of the vector, - * this is all we need to - * generate a parallel vector. + * This constructor takes an Epetra_Map that already knows how to + * distribute the individual components among the MPI processors. Since + * it also includes information about the size of the vector, this is + * all we need to generate a parallel vector. */ explicit Vector (const Epetra_Map ¶llel_partitioning); /** - * Copy constructor from the - * TrilinosWrappers vector - * class. Since a vector of this - * class does not necessarily - * need to be distributed among - * processes, the user needs to - * supply us with an Epetra_Map - * that sets the partitioning - * details. + * Copy constructor from the TrilinosWrappers vector class. Since a + * vector of this class does not necessarily need to be distributed + * among processes, the user needs to supply us with an Epetra_Map that + * sets the partitioning details. */ Vector (const Epetra_Map ¶llel_partitioning, const VectorBase &v); /** - * Reinitialize from a deal.II - * vector. The Epetra_Map specifies the + * Reinitialize from a deal.II vector. The Epetra_Map specifies the * %parallel partitioning. */ template @@ -435,20 +361,15 @@ namespace TrilinosWrappers const dealii::Vector &v); /** - * Reinit functionality. This - * function destroys the old - * vector content and generates a - * new one based on the input - * map. + * Reinit functionality. This function destroys the old vector content + * and generates a new one based on the input map. */ void reinit (const Epetra_Map ¶llel_partitioning, const bool fast = false); /** - * Copy-constructor from deal.II - * vectors. Sets the dimension to that - * of the given vector, and copies all - * elements. + * Copy-constructor from deal.II vectors. Sets the dimension to that of + * the given vector, and copies all elements. */ template Vector (const Epetra_Map ¶llel_partitioning, @@ -459,14 +380,10 @@ namespace TrilinosWrappers */ //@{ /** - * This constructor takes an IndexSet - * that defines how to distribute the - * individual components among the - * MPI processors. Since it also - * includes information about the - * size of the vector, this is all we - * need to generate a %parallel - * vector. + * This constructor takes an IndexSet that defines how to distribute the + * individual components among the MPI processors. Since it also + * includes information about the size of the vector, this is all we + * need to generate a %parallel vector. */ explicit Vector (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD); @@ -479,25 +396,18 @@ namespace TrilinosWrappers const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Copy constructor from the - * TrilinosWrappers vector - * class. Since a vector of this - * class does not necessarily need to - * be distributed among processes, - * the user needs to supply us with - * an IndexSet and an MPI - * communicator that set the - * partitioning details. + * Copy constructor from the TrilinosWrappers vector class. Since a + * vector of this class does not necessarily need to be distributed + * among processes, the user needs to supply us with an IndexSet and an + * MPI communicator that set the partitioning details. */ Vector (const IndexSet ¶llel_partitioning, const VectorBase &v, const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Copy-constructor from deal.II - * vectors. Sets the dimension to - * that of the given vector, and - * copies all the elements. + * Copy-constructor from deal.II vectors. Sets the dimension to that of + * the given vector, and copies all the elements. */ template Vector (const IndexSet ¶llel_partitioning, @@ -505,17 +415,37 @@ namespace TrilinosWrappers const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Reinit functionality. This function - * destroys the old vector content and - * generates a new one based on the - * input partitioning. The flag - * fast determines whether the - * vector should be filled with zero - * (false) or left untouched (true). + * Reinit functionality. This function destroys the old vector content + * and generates a new one based on the input partitioning. The flag + * fast determines whether the vector should be filled with + * zero (false) or left untouched (true). */ void reinit (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD, const bool fast = false); + + /** + * Reinit functionality. This function destroys the old vector content + * and generates a new one based on the input partitioning. In addition + * to just specifying one index set as in all the other methods above, + * this method allows to supply an additional set of ghost + * entries. There are two different versions of a vector that can be + * created. If the flag @p vector_writable is set to @p false, the + * vector only allows read access to the joint set of @p + * parallel_partitioning and @p ghost_entries. The effect of the reinit + * method is then equivalent to calling the other reinit method with an + * index set containing both the locally owned entries and the ghost + * entries. + * + * If the flag @p vector_writable is set to true, this creates an + * alternative storage scheme for ghost elements that allows multiple + * threads to write into the vector (for the other reinit methods, only + * one thread is allowed to write into the ghost entries at a time). + */ + void reinit (const IndexSet &locally_owned_entries, + const IndexSet &ghost_entries, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool vector_writable = false); //@} }; @@ -639,124 +569,86 @@ namespace TrilinosWrappers typedef dealii::types::global_dof_index size_type; /** - * A variable that indicates whether this vector - * supports distributed data storage. If true, then - * this vector also needs an appropriate compress() - * function that allows communicating recent set or - * add operations to individual elements to be communicated - * to other processors. + * A variable that indicates whether this vector supports distributed data + * storage. If true, then this vector also needs an appropriate compress() + * function that allows communicating recent set or add operations to + * individual elements to be communicated to other processors. * - * For the current class, the variable equals - * false, since it does not support parallel data storage. - * If you do need parallel data storage, use - * TrilinosWrappers::MPI::Vector. + * For the current class, the variable equals false, since it does not + * support parallel data storage. If you do need parallel data storage, + * use TrilinosWrappers::MPI::Vector. */ static const bool supports_distributed_data = false; /** - * Default constructor that - * generates an empty (zero size) - * vector. The function - * reinit() will have to - * give the vector the correct + * Default constructor that generates an empty (zero size) vector. The + * function reinit() will have to give the vector the correct * size. */ Vector (); /** - * This constructor takes as - * input the number of elements - * in the vector. + * This constructor takes as input the number of elements in the vector. */ explicit Vector (const size_type n); /** - * This constructor takes as - * input the number of elements - * in the vector. If the map is - * not localized, i.e., if there - * are some elements that are not - * present on all processes, only - * the global size of the map - * will be taken and a localized - * map will be generated - * internally. + * This constructor takes as input the number of elements in the + * vector. If the map is not localized, i.e., if there are some elements + * that are not present on all processes, only the global size of the map + * will be taken and a localized map will be generated internally. */ explicit Vector (const Epetra_Map &partitioning); /** - * This constructor takes as input - * the number of elements in the - * vector. If the index set is not - * localized, i.e., if there are some - * elements that are not present on - * all processes, only the global - * size of the index set will be - * taken and a localized version will - * be generated internally. + * This constructor takes as input the number of elements in the + * vector. If the index set is not localized, i.e., if there are some + * elements that are not present on all processes, only the global size of + * the index set will be taken and a localized version will be generated + * internally. */ explicit Vector (const IndexSet &partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * This constructor takes a - * (possibly parallel) Trilinos - * Vector and generates a - * localized version of the whole - * content on each processor. + * This constructor takes a (possibly parallel) Trilinos Vector and + * generates a localized version of the whole content on each processor. */ explicit Vector (const VectorBase &V); /** - * Copy-constructor from deal.II - * vectors. Sets the dimension to that - * of the given vector, and copies all - * elements. + * Copy-constructor from deal.II vectors. Sets the dimension to that of + * the given vector, and copies all elements. */ template explicit Vector (const dealii::Vector &v); /** - * Reinit function that resizes - * the vector to the size - * specified by n. + * Reinit function that resizes the vector to the size specified by + * n. */ void reinit (const size_type n, const bool fast = false); /** - * Initialization with an - * Epetra_Map. Similar to the call in - * the other class MPI::Vector, with - * the difference that now a copy on - * all processes is generated. This - * initialization function is - * appropriate when the data in the - * localized vector should be - * imported from a distributed vector - * that has been initialized with the - * same communicator. The variable - * fast determines whether - * the vector should be filled with - * zero or left untouched. + * Initialization with an Epetra_Map. Similar to the call in the other + * class MPI::Vector, with the difference that now a copy on all processes + * is generated. This initialization function is appropriate when the data + * in the localized vector should be imported from a distributed vector + * that has been initialized with the same communicator. The variable + * fast determines whether the vector should be filled with zero + * or left untouched. */ void reinit (const Epetra_Map &input_map, const bool fast = false); /** - * Initialization with an - * IndexSet. Similar to the call in the - * other class MPI::Vector, with the - * difference that now a copy on all - * processes is generated. This - * initialization function is - * appropriate in case the data in the - * localized vector should be imported - * from a distributed vector that has - * been initialized with the same - * communicator. The variable - * fast determines whether the - * vector should be filled with zero + * Initialization with an IndexSet. Similar to the call in the other class + * MPI::Vector, with the difference that now a copy on all processes is + * generated. This initialization function is appropriate in case the data + * in the localized vector should be imported from a distributed vector + * that has been initialized with the same communicator. The variable + * fast determines whether the vector should be filled with zero * (false) or left untouched (true). */ void reinit (const IndexSet &input_map, @@ -764,66 +656,52 @@ namespace TrilinosWrappers const bool fast = false); /** - * Reinit function. Takes the - * information of a Vector and copies - * everything to the calling vector, - * now also allowing different maps. + * Reinit function. Takes the information of a Vector and copies + * everything to the calling vector, now also allowing different maps. */ void reinit (const VectorBase &V, const bool fast = false, const bool allow_different_maps = false); /** - * Set all components of the - * vector to the given number @p - * s. Simply pass this down to - * the base class, but we still - * need to declare this function - * to make the example given in - * the discussion about making - * the constructor explicit work. + * Set all components of the vector to the given number @p s. Simply pass + * this down to the base class, but we still need to declare this function + * to make the example given in the discussion about making the + * constructor explicit work. */ Vector &operator = (const TrilinosScalar s); /** - * Sets the left hand argument to - * the (parallel) Trilinos - * Vector. Equivalent to the @p - * reinit function. + * Sets the left hand argument to the (parallel) Trilinos + * Vector. Equivalent to the @p reinit function. */ Vector & operator = (const MPI::Vector &V); /** - * Sets the left hand argument to - * the deal.II vector. + * Sets the left hand argument to the deal.II vector. */ template Vector & operator = (const ::dealii::Vector &V); /** - * Copy operator. Copies both the - * dimension and the content in - * the right hand argument. + * Copy operator. Copies both the dimension and the content in the right + * hand argument. */ Vector & operator = (const Vector &V); /** - * This function does nothing but is - * there for compatibility with the - * @p PETScWrappers::Vector class. + * This function does nothing but is there for compatibility with the @p + * PETScWrappers::Vector class. * - * For the PETSc vector wrapper class, - * this function updates the ghost - * values of the PETSc vector. This - * is necessary after any modification + * For the PETSc vector wrapper class, this function updates the ghost + * values of the PETSc vector. This is necessary after any modification * before reading ghost values. * - * However, for the implementation of - * this class, it is immaterial and thus - * an empty function. + * However, for the implementation of this class, it is immaterial and + * thus an empty function. */ void update_ghost_values () const; }; diff --git a/deal.II/include/deal.II/lac/trilinos_vector_base.h b/deal.II/include/deal.II/lac/trilinos_vector_base.h index 66995f19cd..39b9b79991 100644 --- a/deal.II/include/deal.II/lac/trilinos_vector_base.h +++ b/deal.II/include/deal.II/lac/trilinos_vector_base.h @@ -75,29 +75,20 @@ namespace TrilinosWrappers typedef dealii::types::global_dof_index size_type; /** - * This class implements a - * wrapper for accessing the - * Trilinos vector in the same - * way as we access deal.II - * objects: it is initialized - * with a vector and an element - * within it, and has a - * conversion operator to - * extract the scalar value of - * this element. It also has a - * variety of assignment - * operator for writing to this - * one element. @ingroup - * TrilinosWrappers + * This class implements a wrapper for accessing the Trilinos vector in + * the same way as we access deal.II objects: it is initialized with a + * vector and an element within it, and has a conversion operator to + * extract the scalar value of this element. It also has a variety of + * assignment operator for writing to this one element. + * + * @ingroup TrilinosWrappers */ class VectorReference { private: /** - * Constructor. It is made - * private so as to only allow - * the actual vector class to - * create it. + * Constructor. It is made private so as to only allow the actual vector + * class to create it. */ VectorReference (VectorBase &vector, const size_type index); @@ -105,83 +96,58 @@ namespace TrilinosWrappers public: /** - * This looks like a copy - * operator, but does something - * different than usual. In - * particular, it does not copy - * the member variables of this - * reference. Rather, it - * handles the situation where - * we have two vectors @p v and - * @p w, and assign elements - * like in - * v(i)=w(i). Here, - * both left and right hand - * side of the assignment have - * data type VectorReference, - * but what we really mean is - * to assign the vector - * elements represented by the - * two references. This - * operator implements this - * operation. Note also that - * this allows us to make the - * assignment operator const. + * This looks like a copy operator, but does something different than + * usual. In particular, it does not copy the member variables of this + * reference. Rather, it handles the situation where we have two vectors + * @p v and @p w, and assign elements like in v(i)=w(i). Here, + * both left and right hand side of the assignment have data type + * VectorReference, but what we really mean is to assign the vector + * elements represented by the two references. This operator implements + * this operation. Note also that this allows us to make the assignment + * operator const. */ const VectorReference & operator = (const VectorReference &r) const; /** - * Same as above but for non-const - * reference objects. + * Same as above but for non-const reference objects. */ const VectorReference & operator = (const VectorReference &r); /** - * Set the referenced element of the - * vector to s. + * Set the referenced element of the vector to s. */ const VectorReference & operator = (const TrilinosScalar &s) const; /** - * Add s to the - * referenced element of the - * vector-> + * Add s to the referenced element of the vector-> */ const VectorReference & operator += (const TrilinosScalar &s) const; /** - * Subtract s from the - * referenced element of the - * vector-> + * Subtract s from the referenced element of the vector-> */ const VectorReference & operator -= (const TrilinosScalar &s) const; /** - * Multiply the referenced - * element of the vector by - * s. + * Multiply the referenced element of the vector by s. */ const VectorReference & operator *= (const TrilinosScalar &s) const; /** - * Divide the referenced - * element of the vector by - * s. + * Divide the referenced element of the vector by s. */ const VectorReference & operator /= (const TrilinosScalar &s) const; /** - * Convert the reference to an - * actual value, i.e. return - * the value of the referenced - * element of the vector. + * Convert the reference to an actual value, i.e. return the value of + * the referenced element of the vector. */ operator TrilinosScalar () const; @@ -209,21 +175,17 @@ namespace TrilinosWrappers private: /** - * Point to the vector we are - * referencing. + * Point to the vector we are referencing. */ VectorBase &vector; /** - * Index of the referenced element - * of the vector. + * Index of the referenced element of the vector. */ const size_type index; /** - * Make the vector class a - * friend, so that it can - * create objects of the + * Make the vector class a friend, so that it can create objects of the * present type. */ friend class ::dealii::TrilinosWrappers::VectorBase; @@ -270,11 +232,8 @@ namespace TrilinosWrappers { public: /** - * Declare some of the standard - * types used in all - * containers. These types - * parallel those in the - * C standard libraries + * Declare some of the standard types used in all containers. These types + * parallel those in the C standard libraries * vector<...> class. */ typedef TrilinosScalar value_type; @@ -291,22 +250,15 @@ namespace TrilinosWrappers //@{ /** - * Default constructor that - * generates an empty (zero size) - * vector. The function - * reinit() will have to - * give the vector the correct - * size and distribution among - * processes in case of an MPI - * run. + * Default constructor that generates an empty (zero size) vector. The + * function reinit() will have to give the vector the correct + * size and distribution among processes in case of an MPI run. */ VectorBase (); /** - * Copy constructor. Sets the - * dimension to that of the given - * vector, and copies all the - * elements. + * Copy constructor. Sets the dimension to that of the given vector, and + * copies all the elements. */ VectorBase (const VectorBase &v); @@ -316,47 +268,30 @@ namespace TrilinosWrappers virtual ~VectorBase (); /** - * Release all memory and return - * to a state just like after - * having called the default - * constructor. + * Release all memory and return to a state just like after having called + * the default constructor. */ void clear (); /** - * Reinit functionality, sets the - * dimension and possibly the - * parallel partitioning (Epetra_Map) - * of the calling vector to the - * settings of the input vector. + * Reinit functionality, sets the dimension and possibly the parallel + * partitioning (Epetra_Map) of the calling vector to the settings of the + * input vector. */ void reinit (const VectorBase &v, const bool fast = false); /** - * Compress the underlying - * representation of the Trilinos - * object, i.e. flush the buffers - * of the vector object if it has - * any. This function is - * necessary after writing into a - * vector element-by-element and - * before anything else can be - * done on it. + * Compress the underlying representation of the Trilinos object, + * i.e. flush the buffers of the vector object if it has any. This + * function is necessary after writing into a vector element-by-element + * and before anything else can be done on it. * - * The (defaulted) argument can - * be used to specify the - * compress mode - * (Add or - * Insert) in case - * the vector has not been - * written to since the last - * time this function was - * called. The argument is - * ignored if the vector has - * been added or written to - * since the last time - * compress() was called. + * The (defaulted) argument can be used to specify the compress mode + * (Add or Insert) in case the vector has not + * been written to since the last time this function was called. The + * argument is ignored if the vector has been added or written to since + * the last time compress() was called. * * See @ref GlossCompress "Compressing distributed objects" * for more information. @@ -375,136 +310,87 @@ namespace TrilinosWrappers void compress (const Epetra_CombineMode last_action) DEAL_II_DEPRECATED; /** - * Returns the state of the - * vector, i.e., whether - * compress() has already been - * called after an operation - * requiring data exchange. + * Returns the state of the vector, i.e., whether compress() has already + * been called after an operation requiring data exchange. */ bool is_compressed () const; /** - * Set all components of the - * vector to the given number @p - * s. Simply pass this down to - * the Trilinos Epetra object, - * but we still need to declare - * this function to make the - * example given in the - * discussion about making the - * constructor explicit work. + * Set all components of the vector to the given number @p s. Simply pass + * this down to the Trilinos Epetra object, but we still need to declare + * this function to make the example given in the discussion about making + * the constructor explicit work. * - * Since the semantics of - * assigning a scalar to a vector - * are not immediately clear, - * this operator should really - * only be used if you want to - * set the entire vector to - * zero. This allows the - * intuitive notation - * v=0. Assigning other - * values is deprecated and may - * be disallowed in the future. + * Since the semantics of assigning a scalar to a vector are not + * immediately clear, this operator should really only be used if you want + * to set the entire vector to zero. This allows the intuitive notation + * v=0. Assigning other values is deprecated and may be + * disallowed in the future. */ VectorBase & operator = (const TrilinosScalar s); /** - * Copy function. This function takes - * a VectorBase vector and copies all - * the elements. The target vector - * will have the same parallel - * distribution as the calling - * vector. + * Copy function. This function takes a VectorBase vector and copies all + * the elements. The target vector will have the same parallel + * distribution as the calling vector. */ VectorBase & operator = (const VectorBase &v); /** - * Another copy function. This - * one takes a deal.II vector and - * copies it into a - * TrilinosWrapper vector. Note - * that since we do not provide - * any Epetra_map that tells - * about the partitioning of the - * vector among the MPI - * processes, the size of the - * TrilinosWrapper vector has to - * be the same as the size of the - * input vector. In order to - * change the map, use the - * reinit(const Epetra_Map - * &input_map) function. + * Another copy function. This one takes a deal.II vector and copies it + * into a TrilinosWrapper vector. Note that since we do not provide any + * Epetra_map that tells about the partitioning of the vector among the + * MPI processes, the size of the TrilinosWrapper vector has to be the + * same as the size of the input vector. In order to change the map, use + * the reinit(const Epetra_Map &input_map) function. */ template VectorBase & operator = (const ::dealii::Vector &v); /** - * Test for equality. This - * function assumes that the - * present vector and the one to - * compare with have the same - * size already, since comparing - * vectors of different sizes - * makes not much sense anyway. + * Test for equality. This function assumes that the present vector and + * the one to compare with have the same size already, since comparing + * vectors of different sizes makes not much sense anyway. */ bool operator == (const VectorBase &v) const; /** - * Test for inequality. This - * function assumes that the - * present vector and the one to - * compare with have the same - * size already, since comparing - * vectors of different sizes - * makes not much sense anyway. + * Test for inequality. This function assumes that the present vector and + * the one to compare with have the same size already, since comparing + * vectors of different sizes makes not much sense anyway. */ bool operator != (const VectorBase &v) const; /** - * Return the global dimension of - * the vector. + * Return the global dimension of the vector. */ size_type size () const; /** - * Return the local dimension of - * the vector, i.e. the number of - * elements stored on the present - * MPI process. For sequential - * vectors, this number is the - * same as size(), but for - * parallel vectors it may be - * smaller. + * Return the local dimension of the vector, i.e. the number of elements + * stored on the present MPI process. For sequential vectors, this number + * is the same as size(), but for parallel vectors it may be smaller. * - * To figure out which elements - * exactly are stored locally, - * use local_range(). + * To figure out which elements exactly are stored locally, use + * local_range(). * - * If the vector contains ghost - * elements, they are included in - * this number. + * If the vector contains ghost elements, they are included in this + * number. */ size_type local_size () const; /** - * Return a pair of indices - * indicating which elements of - * this vector are stored - * locally. The first number is - * the index of the first element - * stored, the second the index - * of the one past the last one - * that is stored locally. If - * this is a sequential vector, - * then the result will be the - * pair (0,N), otherwise it will - * be a pair (i,i+n), where - * n=local_size() and i is the first - * element of the vector stored on this processor, corresponding - * to the half open interval $[i,i+n)$ + * Return a pair of indices indicating which elements of this vector are + * stored locally. The first number is the index of the first element + * stored, the second the index of the one past the last one that is + * stored locally. If this is a sequential vector, then the result will be + * the pair (0,N), otherwise it will be a pair + * (i,i+n), where n=local_size() and + * i is the first element of the vector stored on this + * processor, corresponding to the half open interval $[i,i+n)$ * * @note The description above is true most of the time, but * not always. In particular, Trilinos vectors need not store @@ -522,9 +408,8 @@ namespace TrilinosWrappers std::pair local_range () const; /** - * Return whether @p index is in - * the local range or not, see - * also local_range(). + * Return whether @p index is in the local range or not, see also + * local_range(). * * @note The same limitation for the applicability of this * function applies as listed in the documentation of local_range(). @@ -548,88 +433,65 @@ namespace TrilinosWrappers IndexSet locally_owned_elements () const; /** - * Return if the vector contains ghost - * elements. This answer is true if there - * are ghost elements on at least one - * process. + * Return if the vector contains ghost elements. This answer is true if + * there are ghost elements on at least one process. */ bool has_ghost_elements() const; /** - * Return the scalar (inner) - * product of two vectors. The - * vectors must have the same - * size. + * Return the scalar (inner) product of two vectors. The vectors must have + * the same size. */ TrilinosScalar operator * (const VectorBase &vec) const; /** - * Return square of the - * $l_2$-norm. + * Return square of the $l_2$-norm. */ real_type norm_sqr () const; /** - * Mean value of the elements of - * this vector. + * Mean value of the elements of this vector. */ TrilinosScalar mean_value () const; /** - * Compute the minimal value of - * the elements of this vector. + * Compute the minimal value of the elements of this vector. */ TrilinosScalar minimal_value () const; /** - * $l_1$-norm of the vector. The - * sum of the absolute values. + * $l_1$-norm of the vector. The sum of the absolute values. */ real_type l1_norm () const; /** - * $l_2$-norm of the vector. The - * square root of the sum of the - * squares of the elements. + * $l_2$-norm of the vector. The square root of the sum of the squares of + * the elements. */ real_type l2_norm () const; /** - * $l_p$-norm of the vector. The - * pth root of the sum of - * the pth powers of the - * absolute values of the - * elements. + * $l_p$-norm of the vector. The pth root of the sum of the + * pth powers of the absolute values of the elements. */ real_type lp_norm (const TrilinosScalar p) const; /** - * Maximum absolute value of the - * elements. + * Maximum absolute value of the elements. */ real_type linfty_norm () const; /** - * Return whether the vector - * contains only elements with - * value zero. This function is - * mainly for internal - * consistency checks and should - * seldom be used when not in - * debug mode since it uses quite - * some time. + * Return whether the vector contains only elements with value zero. This + * function is mainly for internal consistency checks and should seldom be + * used when not in debug mode since it uses quite some time. */ bool all_zero () const; /** - * Return @p true if the vector - * has no negative entries, - * i.e. all entries are zero or - * positive. This function is - * used, for example, to check - * whether refinement indicators - * are really all positive (or - * zero). + * Return @p true if the vector has no negative entries, i.e. all entries + * are zero or positive. This function is used, for example, to check + * whether refinement indicators are really all positive (or zero). */ bool is_non_negative () const; //@} @@ -641,23 +503,20 @@ namespace TrilinosWrappers //@{ /** - * Provide access to a given - * element, both read and write. + * Provide access to a given element, both read and write. */ reference operator () (const size_type index); /** - * Provide read-only access to an - * element. This is equivalent to - * the el() command. + * Provide read-only access to an element. This is equivalent to the + * el() command. */ TrilinosScalar operator () (const size_type index) const; /** - * Provide access to a given - * element, both read and write. + * Provide access to a given element, both read and write. * * Exactly the same as operator(). */ @@ -665,9 +524,8 @@ namespace TrilinosWrappers operator [] (const size_type index); /** - * Provide read-only access to an - * element. This is equivalent to - * the el() command. + * Provide read-only access to an element. This is equivalent to the + * el() command. * * Exactly the same as operator(). */ @@ -675,21 +533,17 @@ namespace TrilinosWrappers operator [] (const size_type index) const; /** - * A collective get operation: instead - * of getting individual elements of a - * vector, this function allows to get - * a whole set of elements at once. The - * indices of the elements to be read - * are stated in the first argument, - * the corresponding values are returned in the - * second. + * A collective get operation: instead of getting individual elements of a + * vector, this function allows to get a whole set of elements at + * once. The indices of the elements to be read are stated in the first + * argument, the corresponding values are returned in the second. */ void extract_subvector_to (const std::vector &indices, std::vector &values) const; /** - * Just as the above, but with pointers. - * Useful in minimizing copying of data around. + * Just as the above, but with pointers. Useful in minimizing copying of + * data around. */ template void extract_subvector_to (ForwardIterator indices_begin, @@ -697,22 +551,20 @@ namespace TrilinosWrappers OutputIterator values_begin) const; /** - * Return the value of the vector - * entry i. Note that this - * function does only work - * properly when we request a - * data stored on the local - * processor. The function will - * throw an exception in case the - * elements sits on another - * process. + * Return the value of the vector entry i. Note that this function + * does only work properly when we request a data stored on the local + * processor. The function will throw an exception in case the elements + * sits on another process. */ TrilinosScalar el (const size_type index) const; /** - * Make the Vector class a bit like the vector<> class of - * the C++ standard library by returning iterators to the start and end - * of the locally owned elements of this vector. The ordering of local elements corresponds to the one given + * Make the Vector class a bit like the vector<> class of the C++ + * standard library by returning iterators to the start and end of the + * locally owned elements of this vector. The ordering of local elements + * corresponds to the one given by the global indices in case the vector + * is constructed from an IndexSet or other methods in deal.II (note that + * an Epetra_Map can contain elements in arbitrary orders, though). * * It holds that end() - begin() == local_size(). */ @@ -731,31 +583,23 @@ namespace TrilinosWrappers iterator end (); /** - * Return a constant iterator pointing to the element past the end of - * the array of the locally owned entries. + * Return a constant iterator pointing to the element past the end of the + * array of the locally owned entries. */ const_iterator end () const; /** - * A collective set operation: - * instead of setting individual - * elements of a vector, this - * function allows to set a whole - * set of elements at once. The - * indices of the elements to be - * set are stated in the first - * argument, the corresponding - * values in the second. + * A collective set operation: instead of setting individual elements of a + * vector, this function allows to set a whole set of elements at + * once. The indices of the elements to be set are stated in the first + * argument, the corresponding values in the second. */ void set (const std::vector &indices, const std::vector &values); /** - * This is a second collective - * set operation. As a - * difference, this function - * takes a deal.II vector of - * values. + * This is a second collective set operation. As a difference, this + * function takes a deal.II vector of values. */ void set (const std::vector &indices, const ::dealii::Vector &values); @@ -768,109 +612,80 @@ namespace TrilinosWrappers //@{ /** - * This collective set operation - * is of lower level and can - * handle anything else — - * the only thing you have to - * provide is an address where - * all the indices are stored and - * the number of elements to be - * set. + * This collective set operation is of lower level and can handle anything + * else — the only thing you have to provide is an address where all + * the indices are stored and the number of elements to be set. */ void set (const size_type n_elements, const size_type *indices, const TrilinosScalar *values); /** - * A collective add operation: - * This funnction adds a whole - * set of values stored in @p - * values to the vector - * components specified by @p - * indices. + * A collective add operation: This funnction adds a whole set of values + * stored in @p values to the vector components specified by @p indices. */ void add (const std::vector &indices, const std::vector &values); /** - * This is a second collective - * add operation. As a - * difference, this function - * takes a deal.II vector of - * values. + * This is a second collective add operation. As a difference, this + * function takes a deal.II vector of values. */ void add (const std::vector &indices, const ::dealii::Vector &values); /** - * Take an address where - * n_elements are stored - * contiguously and add them into - * the vector. Handles all cases - * which are not covered by the - * other two add() - * functions above. + * Take an address where n_elements are stored contiguously and + * add them into the vector. Handles all cases which are not covered by + * the other two add() functions above. */ void add (const size_type n_elements, const size_type *indices, const TrilinosScalar *values); /** - * Multiply the entire vector by - * a fixed factor. + * Multiply the entire vector by a fixed factor. */ VectorBase &operator *= (const TrilinosScalar factor); /** - * Divide the entire vector by a - * fixed factor. + * Divide the entire vector by a fixed factor. */ VectorBase &operator /= (const TrilinosScalar factor); /** - * Add the given vector to the - * present one. + * Add the given vector to the present one. */ VectorBase &operator += (const VectorBase &V); /** - * Subtract the given vector from - * the present one. + * Subtract the given vector from the present one. */ VectorBase &operator -= (const VectorBase &V); /** - * Addition of @p s to all - * components. Note that @p s is - * a scalar and not a vector. + * Addition of @p s to all components. Note that @p s is a scalar and not + * a vector. */ void add (const TrilinosScalar s); /** - * Simple vector addition, equal - * to the operator - * +=. + * Simple vector addition, equal to the operator +=. * - * Though, if the second argument - * allow_different_maps - * is set, then it is possible to - * add data from a different map. + * Though, if the second argument allow_different_maps is set, + * then it is possible to add data from a different map. */ void add (const VectorBase &V, const bool allow_different_maps = false); /** - * Simple addition of a multiple - * of a vector, i.e. *this = - * a*V. + * Simple addition of a multiple of a vector, i.e. *this = a*V. */ void add (const TrilinosScalar a, const VectorBase &V); /** - * Multiple addition of scaled - * vectors, i.e. *this = a*V + - * b*W. + * Multiple addition of scaled vectors, i.e. *this = a*V + b*W. */ void add (const TrilinosScalar a, const VectorBase &V, @@ -878,17 +693,14 @@ namespace TrilinosWrappers const VectorBase &W); /** - * Scaling and simple vector - * addition, i.e. *this = - * s*(*this) + V. + * Scaling and simple vector addition, i.e. *this = s*(*this) + + * V. */ void sadd (const TrilinosScalar s, const VectorBase &V); /** - * Scaling and simple addition, - * i.e. *this = s*(*this) + - * a*V. + * Scaling and simple addition, i.e. *this = s*(*this) + a*V. */ void sadd (const TrilinosScalar s, const TrilinosScalar a, @@ -904,9 +716,8 @@ namespace TrilinosWrappers const VectorBase &W); /** - * Scaling and multiple addition. - * *this = s*(*this) + a*V + - * b*W + c*X. + * Scaling and multiple addition. *this = s*(*this) + a*V + b*W + + * c*X. */ void sadd (const TrilinosScalar s, const TrilinosScalar a, @@ -917,26 +728,20 @@ namespace TrilinosWrappers const VectorBase &X); /** - * Scale each element of this - * vector by the corresponding - * element in the argument. This - * function is mostly meant to - * simulate multiplication (and - * immediate re-assignment) by a - * diagonal scaling matrix. + * Scale each element of this vector by the corresponding element in the + * argument. This function is mostly meant to simulate multiplication (and + * immediate re-assignment) by a diagonal scaling matrix. */ void scale (const VectorBase &scaling_factors); /** - * Assignment *this = - * a*V. + * Assignment *this = a*V. */ void equ (const TrilinosScalar a, const VectorBase &V); /** - * Assignment *this = a*V + - * b*W. + * Assignment *this = a*V + b*W. */ void equ (const TrilinosScalar a, const VectorBase &V, @@ -944,21 +749,14 @@ namespace TrilinosWrappers const VectorBase &W); /** - * Compute the elementwise ratio - * of the two given vectors, that - * is let this[i] = - * a[i]/b[i]. This is useful - * for example if you want to - * compute the cellwise ratio of - * true to estimated error. + * Compute the elementwise ratio of the two given vectors, that is let + * this[i] = a[i]/b[i]. This is useful for example if you want to + * compute the cellwise ratio of true to estimated error. * - * This vector is appropriately - * scaled to hold the result. + * This vector is appropriately scaled to hold the result. * - * If any of the b[i] is - * zero, the result is - * undefined. No attempt is made - * to catch such situations. + * If any of the b[i] is zero, the result is undefined. No + * attempt is made to catch such situations. */ void ratio (const VectorBase &a, const VectorBase &b); @@ -971,46 +769,34 @@ namespace TrilinosWrappers //@{ /** - * Return a const reference to the - * underlying Trilinos - * Epetra_MultiVector class. + * Return a const reference to the underlying Trilinos Epetra_MultiVector + * class. */ const Epetra_MultiVector &trilinos_vector () const; /** - * Return a (modifyable) reference to - * the underlying Trilinos + * Return a (modifyable) reference to the underlying Trilinos * Epetra_FEVector class. */ Epetra_FEVector &trilinos_vector (); /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the parallel - * partitioning of the vector. + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the parallel partitioning of the vector. */ const Epetra_Map &vector_partitioner () const; /** - * Output of vector in - * user-defined format in analogy - * to the dealii::Vector - * class. + * Output of vector in user-defined format in analogy to the + * dealii::Vector class. */ void print (const char *format = 0) const; /** - * Print to a stream. @p - * precision denotes the desired - * precision with which values - * shall be printed, @p - * scientific whether scientific - * notation shall be used. If @p - * across is @p true then the - * vector is printed in a line, - * while if @p false then the - * elements are printed on a + * Print to a stream. @p precision denotes the desired precision with + * which values shall be printed, @p scientific whether scientific + * notation shall be used. If @p across is @p true then the vector is + * printed in a line, while if @p false then the elements are printed on a * separate line each. */ void print (std::ostream &out, @@ -1019,42 +805,27 @@ namespace TrilinosWrappers const bool across = true) const; /** - * Swap the contents of this - * vector and the other vector @p - * v. One could do this operation - * with a temporary variable and - * copying over the data - * elements, but this function is - * significantly more efficient - * since it only swaps the - * pointers to the data of the - * two vectors and therefore does - * not need to allocate temporary - * storage and move data - * around. Note that the vectors - * need to be of the same size - * and base on the same map. + * Swap the contents of this vector and the other vector @p v. One could + * do this operation with a temporary variable and copying over the data + * elements, but this function is significantly more efficient since it + * only swaps the pointers to the data of the two vectors and therefore + * does not need to allocate temporary storage and move data around. Note + * that the vectors need to be of the same size and base on the same map. * - * This function is analog to the - * the @p swap function of all C - * standard containers. Also, - * there is a global function - * swap(u,v) that simply - * calls u.swap(v), - * again in analogy to standard + * This function is analog to the the @p swap function of all C standard + * containers. Also, there is a global function swap(u,v) that + * simply calls u.swap(v), again in analogy to standard * functions. */ void swap (VectorBase &v); /** - * Estimate for the memory - * consumption in bytes. + * Estimate for the memory consumption in bytes. */ std::size_t memory_consumption () const; /** - * Return a reference to the MPI - * communicator object in use with this + * Return a reference to the MPI communicator object in use with this * object. */ const MPI_Comm &get_mpi_communicator () const; @@ -1091,57 +862,45 @@ namespace TrilinosWrappers private: /** - * Trilinos doesn't allow to - * mix additions to matrix - * entries and overwriting them - * (to make synchronisation of - * parallel computations - * simpler). The way we do it - * is to, for each access - * operation, store whether it - * is an insertion or an - * addition. If the previous - * one was of different type, - * then we first have to flush - * the Trilinos buffers; - * otherwise, we can simply go - * on. Luckily, Trilinos has - * an object for this which - * does already all the - * parallel communications in - * such a case, so we simply - * use their model, which - * stores whether the last - * operation was an addition or - * an insertion. + * Trilinos doesn't allow to mix additions to matrix entries and + * overwriting them (to make synchronisation of parallel computations + * simpler). The way we do it is to, for each access operation, store + * whether it is an insertion or an addition. If the previous one was of + * different type, then we first have to flush the Trilinos buffers; + * otherwise, we can simply go on. Luckily, Trilinos has an object for + * this which does already all the parallel communications in such a case, + * so we simply use their model, which stores whether the last operation + * was an addition or an insertion. */ Epetra_CombineMode last_action; /** - * A boolean variable to hold - * information on whether the - * vector is compressed or not. + * A boolean variable to hold information on whether the vector is + * compressed or not. */ bool compressed; /** - * Whether this vector has ghost elements. This is true - * on all processors even if only one of them has any - * ghost elements. + * Whether this vector has ghost elements. This is true on all processors + * even if only one of them has any ghost elements. */ bool has_ghosts; /** - * An Epetra distibuted vector - * type. Requires an existing - * Epetra_Map for storing data. + * An Epetra distibuted vector type. Requires an existing Epetra_Map for + * storing data. */ std_cxx1x::shared_ptr vector; + /** + * A vector object in Trilinos to be used for collecting the non-local + * elements if the vector was constructed with an additional IndexSet + * describing ghost elements. + */ + std_cxx1x::shared_ptr nonlocal_vector; /** - * Make the reference class a - * friend. + * Make the reference class a friend. */ friend class internal::VectorReference; friend class Vector; @@ -1418,6 +1177,9 @@ namespace TrilinosWrappers if (fast == false || vector_partitioner().SameAs(v.vector_partitioner())==false) vector.reset (new Epetra_FEVector(*v.vector)); + + if (v.nonlocal_vector.get() != 0) + nonlocal_vector.reset(new Epetra_MultiVector(v.nonlocal_vector->Map(), 1)); } @@ -1440,58 +1202,6 @@ namespace TrilinosWrappers - inline - void - VectorBase::compress (::dealii::VectorOperation::values given_last_action) - { - //Select which mode to send to - //Trilinos. Note that we use last_action - //if available and ignore what the user - //tells us to detect wrongly mixed - //operations. Typically given_last_action - //is only used on machines that do not - //execute an operation (because they have - //no own cells for example). - Epetra_CombineMode mode = last_action; - if (last_action == Zero) - { - if (given_last_action==::dealii::VectorOperation::add) - mode = Add; - else if (given_last_action==::dealii::VectorOperation::insert) - mode = Insert; - } - -#ifdef DEBUG -# ifdef DEAL_II_WITH_MPI - // check that every process has decided - // to use the same mode. This will - // otherwise result in undefined - // behaviour in the call to - // GlobalAssemble(). - double double_mode = mode; - Utilities::MPI::MinMaxAvg result - = Utilities::MPI::min_max_avg (double_mode, - dynamic_cast - (&vector_partitioner().Comm())->GetMpiComm()); - Assert(result.max-result.min<1e-5, - ExcMessage ("Not all processors agree whether the last operation on " - "this vector was an addition or a set operation. This will " - "prevent the compress() operation from succeeding.")); - -# endif -#endif - - // Now pass over the information about - // what we did last to the vector. - const int ierr = vector->GlobalAssemble(mode); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - last_action = Zero; - - compressed = true; - } - - - inline void VectorBase::compress () @@ -1515,6 +1225,9 @@ namespace TrilinosWrappers AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + if (nonlocal_vector.get() != 0) + nonlocal_vector->PutScalar(0.); + return *this; } @@ -1574,7 +1287,9 @@ namespace TrilinosWrappers { const size_type row = indices[i]; const TrilinosWrappers::types::int_type local_row = vector->Map().LID(static_cast(row)); - if (local_row == -1) + if (local_row != -1) + (*vector)[0][local_row] = values[i]; + else { const int ierr = vector->ReplaceGlobalValues (1, (const TrilinosWrappers::types::int_type *)(&row), @@ -1582,8 +1297,11 @@ namespace TrilinosWrappers AssertThrow (ierr == 0, ExcTrilinosError(ierr)); compressed = false; } - else - (*vector)[0][local_row] = values[i]; + // in set operation, do not use the pre-allocated vector for nonlocal + // entries even if it exists. This is to ensure that we really only + // set the elements touched by the set() method and not all contained + // in the nonlocal entries vector (there is no way to distinguish them + // on the receiving processor) } } @@ -1642,7 +1360,9 @@ namespace TrilinosWrappers { const size_type row = indices[i]; const TrilinosWrappers::types::int_type local_row = vector->Map().LID(static_cast(row)); - if (local_row == -1) + if (local_row != -1) + (*vector)[0][local_row] += values[i]; + else if (nonlocal_vector.get() == 0) { const int ierr = vector->SumIntoGlobalValues (1, (const TrilinosWrappers::types::int_type *)(&row), @@ -1651,7 +1371,17 @@ namespace TrilinosWrappers compressed = false; } else - (*vector)[0][local_row] += values[i]; + { + // use pre-allocated vector for non-local entries if it exists for + // addition operation + const TrilinosWrappers::types::int_type my_row = nonlocal_vector->Map().LID(static_cast(row)); + Assert(my_row != -1, + ExcMessage("Attempted to write into off-processor vector entry " + "that has not be specified as being writable upon " + "initialization")); + (*nonlocal_vector)[0][my_row] += values[i]; + compressed = false; + } } } diff --git a/deal.II/include/deal.II/matrix_free/matrix_free.h b/deal.II/include/deal.II/matrix_free/matrix_free.h index ffc29f351e..5df978520e 100644 --- a/deal.II/include/deal.II/matrix_free/matrix_free.h +++ b/deal.II/include/deal.II/matrix_free/matrix_free.h @@ -1980,17 +1980,19 @@ namespace internal namespace partition { - template + template class CellWork : public tbb::task { public: CellWork (const Worker &worker_in, const unsigned int partition_in, - const internal::MatrixFreeFunctions::TaskInfo &task_info_in) + const internal::MatrixFreeFunctions::TaskInfo &task_info_in, + const bool is_blocked_in) : worker (worker_in), partition (partition_in), - task_info (task_info_in) + task_info (task_info_in), + is_blocked (is_blocked_in) {}; tbb::task *execute () { @@ -1998,7 +2000,7 @@ namespace internal (task_info.partition_color_blocks_data[partition], task_info.partition_color_blocks_data[partition+1]); worker(cell_range); - if (blocked==true) + if (is_blocked==true) dummy->spawn (*dummy); return NULL; } @@ -2009,92 +2011,82 @@ namespace internal const Worker &worker; const unsigned int partition; const internal::MatrixFreeFunctions::TaskInfo &task_info; + const bool is_blocked; }; - template + template class PartitionWork : public tbb::task { public: PartitionWork (const Worker &function_in, const unsigned int partition_in, - const internal::MatrixFreeFunctions::TaskInfo &task_info_in) + const internal::MatrixFreeFunctions::TaskInfo &task_info_in, + const bool is_blocked_in = false) : function (function_in), partition (partition_in), - task_info (task_info_in) + task_info (task_info_in), + is_blocked (is_blocked_in) {}; tbb::task *execute () { - if (false) - { - std::pair cell_range - (task_info.partition_color_blocks_data - [task_info.partition_color_blocks_row_index[partition]], - task_info.partition_color_blocks_data - [task_info.partition_color_blocks_row_index[partition+1]]); - function(cell_range); - } - else + tbb::empty_task *root = new( tbb::task::allocate_root() ) + tbb::empty_task; + unsigned int evens = task_info.partition_evens[partition]; + unsigned int odds = task_info.partition_odds[partition]; + unsigned int n_blocked_workers = + task_info.partition_n_blocked_workers[partition]; + unsigned int n_workers = task_info.partition_n_workers[partition]; + std::vector*> worker(n_workers); + std::vector*> blocked_worker(n_blocked_workers); + + root->set_ref_count(evens+1); + for (unsigned int j=0; j*> worker(n_workers); - std::vector*> blocked_worker(n_blocked_workers); - - root->set_ref_count(evens+1); - for (unsigned int j=0; jallocate_child()) + CellWork(function, task_info. + partition_color_blocks_row_index[partition]+2*j, + task_info, false); + if (j>0) { - worker[j] = new(root->allocate_child()) - CellWork(function,task_info. - partition_color_blocks_row_index - [partition] + 2*j, task_info); - if (j>0) - { - worker[j]->set_ref_count(2); - blocked_worker[j-1]->dummy = new(worker[j]->allocate_child()) - tbb::empty_task; - worker[j-1]->spawn(*blocked_worker[j-1]); - } - else - worker[j]->set_ref_count(1); - if (jset_ref_count(2); + blocked_worker[j-1]->dummy = new(worker[j]->allocate_child()) + tbb::empty_task; + worker[j-1]->spawn(*blocked_worker[j-1]); + } + else + worker[j]->set_ref_count(1); + if (jallocate_child()) + CellWork(function, task_info. + partition_color_blocks_row_index + [partition] + 2*j+1, task_info, true); + } + else + { + if (odds==evens) { - blocked_worker[j] = new(worker[j]->allocate_child()) - CellWork(function,task_info. - partition_color_blocks_row_index - [partition] + 2*j+1, task_info); + worker[evens] = new(worker[j]->allocate_child()) + CellWork(function, task_info. + partition_color_blocks_row_index[partition]+2*j+1, + task_info, false); + worker[j]->spawn(*worker[evens]); } else { - if (odds==evens) - { - worker[evens] = new(worker[j]->allocate_child()) - CellWork(function, - task_info. - partition_color_blocks_row_index - [partition]+2*j+1,task_info); - worker[j]->spawn(*worker[evens]); - } - else - { - tbb::empty_task *child = new(worker[j]->allocate_child()) - tbb::empty_task(); - worker[j]->spawn(*child); - } + tbb::empty_task *child = new(worker[j]->allocate_child()) + tbb::empty_task(); + worker[j]->spawn(*child); } } - - root->wait_for_all(); - root->destroy(*root); } - if (blocked==true) + + root->wait_for_all(); + root->destroy(*root); + if (is_blocked==true) dummy->spawn (*dummy); return NULL; } @@ -2105,6 +2097,7 @@ namespace internal const Worker &function; const unsigned int partition; const internal::MatrixFreeFunctions::TaskInfo &task_info; + const bool is_blocked; }; } // end of namespace partition @@ -2150,17 +2143,19 @@ namespace internal }; - template + template class PartitionWork : public tbb::task { public: PartitionWork (const Worker &worker_in, const unsigned int partition_in, - const internal::MatrixFreeFunctions::TaskInfo &task_info_in) + const internal::MatrixFreeFunctions::TaskInfo &task_info_in, + const bool is_blocked_in) : worker (worker_in), partition (partition_in), - task_info (task_info_in) + task_info (task_info_in), + is_blocked (is_blocked_in) {}; tbb::task *execute () { @@ -2168,7 +2163,7 @@ namespace internal upper = task_info.partition_color_blocks_data[partition+1]; parallel_for(tbb::blocked_range(lower,upper,1), CellWork (worker,task_info)); - if (blocked==true) + if (is_blocked==true) dummy->spawn (*dummy); return NULL; } @@ -2179,6 +2174,7 @@ namespace internal const Worker &worker; const unsigned int partition; const internal::MatrixFreeFunctions::TaskInfo &task_info; + const bool is_blocked; }; } // end of namespace color @@ -2273,9 +2269,9 @@ MatrixFree::cell_loop root->set_ref_count(evens+1); unsigned int n_blocked_workers = task_info.n_blocked_workers; unsigned int n_workers = task_info.n_workers; - std::vector*> + std::vector*> worker(n_workers); - std::vector*> + std::vector*> blocked_worker(n_blocked_workers); internal::MPIComCompress *worker_compr = new(root->allocate_child()) @@ -2286,8 +2282,8 @@ MatrixFree::cell_loop if (j>0) { worker[j] = new(root->allocate_child()) - internal::partition::PartitionWork - (func,2*j,task_info); + internal::partition::PartitionWork + (func,2*j,task_info,false); worker[j]->set_ref_count(2); blocked_worker[j-1]->dummy = new(worker[j]->allocate_child()) tbb::empty_task; @@ -2299,8 +2295,8 @@ MatrixFree::cell_loop else { worker[j] = new(worker_compr->allocate_child()) - internal::partition::PartitionWork - (func,2*j,task_info); + internal::partition::PartitionWork + (func,2*j,task_info,false); worker[j]->set_ref_count(2); internal::MPIComDistribute *worker_dist = new (worker[j]->allocate_child()) @@ -2310,16 +2306,16 @@ MatrixFree::cell_loop if (jallocate_child()) - internal::partition::PartitionWork - (func,2*j+1,task_info); + internal::partition::PartitionWork + (func,2*j+1,task_info,true); } else { if (odds==evens) { worker[evens] = new(worker[j]->allocate_child()) - internal::partition::PartitionWork - (func,2*j+1,task_info); + internal::partition::PartitionWork + (func,2*j+1,task_info,false); worker[j]->spawn(*worker[evens]); } else @@ -2348,8 +2344,8 @@ MatrixFree::cell_loop unsigned int n_blocked_workers = odds-(odds+evens+1)%2; unsigned int n_workers = task_info.partition_color_blocks_data.size()-1- n_blocked_workers; - std::vector*> worker(n_workers); - std::vector*> blocked_worker(n_blocked_workers); + std::vector*> worker(n_workers); + std::vector*> blocked_worker(n_blocked_workers); unsigned int worker_index = 0, slice_index = 0; unsigned int spawn_index = 0, spawn_index_new = 0; int spawn_index_child = -2; @@ -2362,10 +2358,10 @@ MatrixFree::cell_loop spawn_index_new = worker_index; if (part == 0) worker[worker_index] = new(worker_compr->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info); + internal::color::PartitionWork(func,slice_index,task_info,false); else worker[worker_index] = new(root->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info); + internal::color::PartitionWork(func,slice_index,task_info,false); slice_index++; for (; slice_index::cell_loop worker[worker_index]->set_ref_count(1); worker_index++; worker[worker_index] = new (worker[worker_index-1]->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info); + internal::color::PartitionWork(func,slice_index,task_info,false); } worker[worker_index]->set_ref_count(2); if (part>0) @@ -2402,14 +2398,14 @@ MatrixFree::cell_loop if (partallocate_child()) - internal::color::PartitionWork(func,slice_index,task_info); + internal::color::PartitionWork(func,slice_index,task_info,true); slice_index++; if (slice_index< task_info.partition_color_blocks_row_index[part+1]) { blocked_worker[part/2]->set_ref_count(1); worker[worker_index] = new(blocked_worker[part/2]->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info); + internal::color::PartitionWork(func,slice_index,task_info,false); slice_index++; } else @@ -2428,7 +2424,7 @@ MatrixFree::cell_loop worker_index++; } worker[worker_index] = new (worker[worker_index-1]->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info); + internal::color::PartitionWork(func,slice_index,task_info,false); } spawn_index_child = worker_index; worker_index++; diff --git a/deal.II/include/deal.II/meshworker/dof_info.h b/deal.II/include/deal.II/meshworker/dof_info.h index 402f7822ea..7d0f7f62f8 100644 --- a/deal.II/include/deal.II/meshworker/dof_info.h +++ b/deal.II/include/deal.II/meshworker/dof_info.h @@ -172,6 +172,10 @@ namespace MeshWorker /// The block structure of the system SmartPointer > block_info; + /** + * The structure refers to a cell with level data instead of + * active data. + */ bool level_cell; private: /** @@ -280,6 +284,12 @@ namespace MeshWorker * face is available. */ bool exterior_face_available[GeometryInfo::faces_per_cell]; + + /** + * A flag to specify if the current object has been set to a valid + * cell. + */ + bool cell_valid; }; //----------------------------------------------------------------------// @@ -417,7 +427,7 @@ namespace MeshWorker inline DoFInfoBox::DoFInfoBox(const DOFINFO &seed) : - cell(seed) + cell(seed), cell_valid(true) { for (unsigned int i=0; i::faces_per_cell; ++i) { @@ -433,7 +443,7 @@ namespace MeshWorker inline DoFInfoBox::DoFInfoBox(const DoFInfoBox &other) : - cell(other.cell) + cell(other.cell), cell_valid(other.cell_valid) { for (unsigned int i=0; i::faces_per_cell; ++i) { @@ -449,6 +459,7 @@ namespace MeshWorker inline void DoFInfoBox::reset () { + cell_valid = false; for (unsigned int i=0; i::faces_per_cell; ++i) { interior_face_available[i] = false; @@ -462,6 +473,9 @@ namespace MeshWorker inline void DoFInfoBox::assemble (ASSEMBLER &assembler) const { + if (!cell_valid) + return; + assembler.assemble(cell); for (unsigned int i=0; i::faces_per_cell; ++i) { diff --git a/deal.II/include/deal.II/meshworker/integration_info.h b/deal.II/include/deal.II/meshworker/integration_info.h index 1bd8c63b5c..4eb299711e 100644 --- a/deal.II/include/deal.II/meshworker/integration_info.h +++ b/deal.II/include/deal.II/meshworker/integration_info.h @@ -386,7 +386,7 @@ namespace MeshWorker * these grid entities should be performed. * * If the parameter force is true, then all quadrature - * sets are filled with new quadrature ruels. If it is false, then + * sets are filled with new quadrature rules. If it is false, then * only empty rules are changed. */ void initialize_gauss_quadrature(unsigned int n_cell_points, diff --git a/deal.II/include/deal.II/meshworker/loop.h b/deal.II/include/deal.II/meshworker/loop.h index 6595f91d94..22110233a2 100644 --- a/deal.II/include/deal.II/meshworker/loop.h +++ b/deal.II/include/deal.II/meshworker/loop.h @@ -81,7 +81,8 @@ namespace MeshWorker */ LoopControl() : own_cells(true), ghost_cells(false), - faces_to_ghost(LoopControl::one), own_faces(LoopControl::one) + faces_to_ghost(LoopControl::one), own_faces(LoopControl::one), + cells_first(true) { } @@ -117,80 +118,19 @@ namespace MeshWorker * Loop over faces between two locally owned cells: * - never: do not assemble face terms * - one: assemble once (always coming from the finer side) - * - both: assemble each face twice ALSO IF FINER?!?!? - * Default is one_side. + * - both: assemble each face twice (not implemented for hanging nodes!) + * Default is one. */ FaceOption own_faces; - - /** - * Based on the flags in this class, decide if this face needs to be - * assembled. - */ - template - bool operator() (const ITERATOR& cell, unsigned int face, bool is_level) const; /** - * Based on the flags in this class, decide if this cell needs to be - * assembled. + * Flag to determine if cells integrals should be done before or after + * face integrals. Default is t */ - template - bool operator() (const ITERATOR& cell, bool is_level) const; + bool cells_first; }; - - template - bool LoopControl::operator() (const ITERATOR& cell, unsigned int face, bool is_level) const - { - const ITERATOR neighbor = cell->neighbor(face); - const bool c_local = (is_level) - ? (cell->is_locally_owned()) - : (cell->is_locally_owned_on_level()); - const bool n_local = (is_level) - ? (neighbor->is_locally_owned()) - : (neighbor->is_locally_owned_on_level()); - - if (!c_local && !n_local) - return false; - - if (c_local && n_local) - { - if (own_faces==LoopControl::never) - return false; - - //TODO: -// if (cell->neighbor_is_coarser(face_no)) -// return false; - - if (own_faces==LoopControl::one && neighbor < cell) - return false; - - Assert(own_faces==LoopControl::both, ExcInternalError()); - return true; - } - else - { - // interface between owned and ghost cell - - // TODO - - - } - } - template - bool LoopControl::operator() (const ITERATOR &cell, bool is_level) const - { - const bool c_local = (is_level) - ? (cell->is_locally_owned()) - : (cell->is_locally_owned_on_level()); - if (own_cells && c_local) - return true; - if (ghost_cells && cell->is_ghost()) - return true; - return false; - } - - /** @@ -220,10 +160,7 @@ namespace MeshWorker * integrals are to be dealt with first. Note that independent of the * value of this flag, cell and face integrals of a given cell are * all taken care of before moving to the next cell. - * @param unique_faces_only determines, that a face between two cells - * of the same level is processed only from the cell which is less - * than its neighbor. If this parameter is false these faces - * are processed from both cells. + * @param loop_control control structure to specify what actions should be performed. * * @ingroup MeshWorker * @author Guido Kanschat @@ -239,7 +176,6 @@ namespace MeshWorker const std_cxx1x::function &face_worker, - const bool cells_first, const LoopControl & loop_control) { const bool ignore_subdomain = (cell->get_triangulation().locally_owned_subdomain() @@ -249,25 +185,27 @@ namespace MeshWorker ? cell->level_subdomain_id() : cell->subdomain_id(); + const bool own_cell = ignore_subdomain || (csid == cell->get_triangulation().locally_owned_subdomain()); + + dof_info.reset(); + if ((!ignore_subdomain) && (csid == numbers::artificial_subdomain_id)) return; - - dof_info.reset(); + dof_info.cell.reinit(cell); + dof_info.cell_valid = true; const bool integrate_cell = (cell_worker != 0); const bool integrate_boundary = (boundary_worker != 0); const bool integrate_interior_face = (face_worker != 0); - dof_info.reset(); - - dof_info.cell.reinit(cell); if (integrate_cell) info.cell.reinit(dof_info.cell); // Execute this, if cells // have to be dealt with // before faces - if (integrate_cell && cells_first) + if (integrate_cell && loop_control.cells_first && + ((loop_control.own_cells && own_cell) || (loop_control.ghost_cells && !own_cell))) cell_worker(dof_info.cell, info.cell); // Call the callback function in @@ -282,7 +220,8 @@ namespace MeshWorker typename ITERATOR::AccessorType::Container::face_iterator face = cell->face(face_no); if (cell->at_boundary(face_no)) { - if (integrate_boundary) + // only integrate boundary faces of own cells + if (integrate_boundary && own_cell) { dof_info.interior_face_available[face_no] = true; dof_info.interior[face_no].reinit(cell, face, face_no); @@ -295,6 +234,28 @@ namespace MeshWorker // Interior face TriaIterator neighbor = cell->neighbor(face_no); + types::subdomain_id neighbid = numbers::artificial_subdomain_id; + if (neighbor->is_level_cell()) + neighbid = neighbor->level_subdomain_id(); + //subdomain id is only valid for active cells + else if (neighbor->active()) + neighbid = neighbor->subdomain_id(); + + const bool own_neighbor = ignore_subdomain || + (neighbid == cell->get_triangulation().locally_owned_subdomain()); + + // skip all faces between two ghost cells + if (!own_cell && !own_neighbor) + continue; + + // skip if the user doesn't want faces between own cells + if (own_cell && own_neighbor && loop_control.own_faces==LoopControl::never) + continue; + + // skip face to ghost + if (own_cell != own_neighbor && loop_control.faces_to_ghost==LoopControl::never) + continue; + // Deal with // refinement edges // from the refined @@ -310,6 +271,12 @@ namespace MeshWorker Assert(!cell->has_children(), ExcInternalError()); Assert(!neighbor->has_children(), ExcInternalError()); + // skip if only one processor needs to assemble the face + // to a ghost cell and the fine cell is not ours. + if (!own_cell + && loop_control.faces_to_ghost == LoopControl::one) + continue; + const std::pair neighbor_face_no = cell->neighbor_of_coarser_neighbor(face_no); const typename ITERATOR::AccessorType::Container::face_iterator nface @@ -328,20 +295,38 @@ namespace MeshWorker } else { - // Neighbor is - // on same - // level, but - // only do this - // from one side. - if (loop_control.own_faces != LoopControl::both && (neighbor < cell)) continue; - - // If iterator - // is active - // and neighbor - // is refined, - // skip + // If iterator is active and neighbor is refined, skip // internal face. if (internal::is_active_iterator(cell) && neighbor->has_children()) + { + Assert(loop_control.own_faces != LoopControl::both, ExcMessage( + "Assembling from both sides for own_faces is not " + "supported with hanging nodes!")); + continue; + } + + // Now neighbor is on same level, double-check this: + Assert(cell->level()==neighbor->level(), ExcInternalError()); + + // only do faces on same level from one side (unless + // LoopControl says otherwise) + if (own_cell && own_neighbor + && loop_control.own_faces == LoopControl::one + && (neighbor < cell)) + continue; + + // independent of loop_control.faces_to_ghost, + // we only look at faces to ghost on the same level once + // (only where own_cell=true and own_neighbor=false) + if (!own_cell) + continue; + + // now only one processor assembles faces_to_ghost. This + // logic is based on the subdomain id and is handled inside + // operator<. + if (own_cell && !own_neighbor + && loop_control.faces_to_ghost == LoopControl::one + && (neighbor < cell)) continue; const unsigned int neighbor_face_no = cell->neighbor_face_no(face_no); @@ -368,7 +353,8 @@ namespace MeshWorker // Execute this, if faces // have to be handled first - if (integrate_cell && !cells_first) + if (integrate_cell && !loop_control.cells_first && + ((loop_control.own_cells && own_cell) || (loop_control.ghost_cells && !own_cell))) cell_worker(dof_info.cell, info.cell); } @@ -389,6 +375,48 @@ namespace MeshWorker * @ingroup MeshWorker * @author Guido Kanschat, 2009 */ + template + void loop(ITERATOR begin, + typename identity::type end, + DOFINFO &dinfo, + INFOBOX &info, + const std_cxx1x::function &cell_worker, + const std_cxx1x::function &boundary_worker, + const std_cxx1x::function &face_worker, + ASSEMBLER &assembler, + const LoopControl &lctrl = LoopControl()) + { + DoFInfoBox dof_info(dinfo); + + assembler.initialize_info(dof_info.cell, false); + for (unsigned int i=0; i::faces_per_cell; ++i) + { + assembler.initialize_info(dof_info.interior[i], true); + assembler.initialize_info(dof_info.exterior[i], true); + } + + // Loop over all cells + #ifdef DEAL_II_MESHWORKER_PARALLEL + WorkStream::run(begin, end, + std_cxx1x::bind(&cell_action, + std_cxx1x::_1, std_cxx1x::_3, std_cxx1x::_2, + cell_worker, boundary_worker, face_worker, lctrl), + std_cxx1x::bind(&internal::assemble, std_cxx1x::_1, &assembler), + info, dof_info); + #else + for (ITERATOR cell = begin; cell != end; ++cell) + { + cell_action(cell, dof_info, + info, cell_worker, + boundary_worker, face_worker, + lctrl); + dof_info.assemble(assembler); + } + #endif + } + template void loop(ITERATOR begin, typename identity::type end, @@ -400,40 +428,28 @@ namespace MeshWorker typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, - bool cells_first = true, - bool unique_faces_only = true) - { - DoFInfoBox dof_info(dinfo); + bool cells_first, + bool unique_faces_only = true) DEAL_II_DEPRECATED; - assembler.initialize_info(dof_info.cell, false); - for (unsigned int i=0; i::faces_per_cell; ++i) - { - assembler.initialize_info(dof_info.interior[i], true); - assembler.initialize_info(dof_info.exterior[i], true); - } + template + void loop(ITERATOR begin, + typename identity::type end, + DOFINFO &dinfo, + INFOBOX &info, + const std_cxx1x::function &cell_worker, + const std_cxx1x::function &boundary_worker, + const std_cxx1x::function &face_worker, + ASSEMBLER &assembler, + bool cells_first, + bool unique_faces_only) + { + LoopControl lctrl; + lctrl.cells_first = cells_first; + lctrl.own_faces = (unique_faces_only)?LoopControl::one:LoopControl::both; - LoopControl lctrl; - lctrl.own_faces = (unique_faces_only)?LoopControl::one:LoopControl::both; - - // Loop over all cells -#ifdef DEAL_II_MESHWORKER_PARALLEL - WorkStream::run(begin, end, - std_cxx1x::bind(&cell_action, - std_cxx1x::_1, std_cxx1x::_3, std_cxx1x::_2, - cell_worker, boundary_worker, face_worker, cells_first, lctrl), - std_cxx1x::bind(&internal::assemble, std_cxx1x::_1, &assembler), - info, dof_info); -#else - for (ITERATOR cell = begin; cell != end; ++cell) - { - cell_action(cell, dof_info, - info, cell_worker, - boundary_worker, face_worker, - cells_first, - lctrl); - dof_info.assemble(assembler); - } -#endif + loop(begin, end, dinfo, info, cell_worker, boundary_worker, face_worker, assembler, lctrl); } /** @@ -456,7 +472,7 @@ namespace MeshWorker IntegrationInfo &, IntegrationInfo &)> &face_worker, ASSEMBLER &assembler, - bool cells_first = true) DEAL_II_DEPRECATED; + bool cells_first) DEAL_II_DEPRECATED; template @@ -501,7 +517,17 @@ namespace MeshWorker IntegrationInfoBox &box, const LocalIntegrator &integrator, ASSEMBLER &assembler, - bool cells_first = true) + bool cells_first) +DEAL_II_DEPRECATED; + + template + void integration_loop(ITERATOR begin, + typename identity::type end, + DoFInfo &dof_info, + IntegrationInfoBox &box, + const LocalIntegrator &integrator, + ASSEMBLER &assembler, + bool cells_first) { std_cxx1x::function&, IntegrationInfo&)> cell_worker; std_cxx1x::function&, IntegrationInfo&)> boundary_worker; @@ -526,7 +552,40 @@ namespace MeshWorker cells_first); } + /** + * As above but using LoopControl + */ + template + void integration_loop(ITERATOR begin, + typename identity::type end, + DoFInfo &dof_info, + IntegrationInfoBox &box, + const LocalIntegrator &integrator, + ASSEMBLER &assembler, + const LoopControl &lctrl = LoopControl()) + { + std_cxx1x::function&, IntegrationInfo&)> cell_worker; + std_cxx1x::function&, IntegrationInfo&)> boundary_worker; + std_cxx1x::function &, DoFInfo &, + IntegrationInfo &, + IntegrationInfo &)> face_worker; + if (integrator.use_cell) + cell_worker = std_cxx1x::bind(&LocalIntegrator::cell, &integrator, std_cxx1x::_1, std_cxx1x::_2); + if (integrator.use_boundary) + boundary_worker = std_cxx1x::bind(&LocalIntegrator::boundary, &integrator, std_cxx1x::_1, std_cxx1x::_2); + if (integrator.use_face) + face_worker = std_cxx1x::bind(&LocalIntegrator::face, &integrator, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3, std_cxx1x::_4); + loop + (begin, end, + dof_info, + box, + cell_worker, + boundary_worker, + face_worker, + assembler, + lctrl); + } } diff --git a/deal.II/include/deal.II/meshworker/simple.h b/deal.II/include/deal.II/meshworker/simple.h index 9f279b3c8b..224729da48 100644 --- a/deal.II/include/deal.II/meshworker/simple.h +++ b/deal.II/include/deal.II/meshworker/simple.h @@ -824,24 +824,13 @@ namespace MeshWorker { AssertDimension(M.m(), i1.size()); AssertDimension(M.n(), i2.size()); + Assert(mg_constrained_dofs == 0, ExcInternalError()); +//TODO: Possibly remove this function all together - if (mg_constrained_dofs == 0) - { - for (unsigned int j=0; j= threshold) - G.add(i1[j], i2[k], M(j,k)); - } - else - { - for (unsigned int j=0; j= threshold) - { - if (!mg_constrained_dofs->continuity_across_refinement_edges()) - G.add(i1[j], i2[k], M(j,k)); - } - } + for (unsigned int j=0; j= threshold) + G.add(i1[j], i2[k], M(j,k)); } @@ -868,28 +857,35 @@ namespace MeshWorker { for (unsigned int j=0; j= threshold) - if (!mg_constrained_dofs->at_refinement_edge(level, i1[j]) && - !mg_constrained_dofs->at_refinement_edge(level, i2[k])) - { - if (mg_constrained_dofs->set_boundary_values()) - { - // At the boundary, only enter the term on the - // diagonal, but not the coupling terms - if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) && - !mg_constrained_dofs->is_boundary_index(level, i2[k])) - || - (mg_constrained_dofs->is_boundary_index(level, i1[j]) && - mg_constrained_dofs->is_boundary_index(level, i2[k]) && - i1[j] == i2[k])) - G.add(i1[j], i2[k], M(j,k)); - } - else - G.add(i1[j], i2[k], M(j,k)); - } - } + { + // Only enter the local values into the global matrix, + // if the value is larger than the threshold + if (std::fabs(M(j,k)) < threshold) + continue; + + // Do not enter, if either the row or the column + // corresponds to an index on the refinement edge. The + // level problems are solved with homogeneous + // Dirichlet boundary conditions, therefore we + // eliminate these rows and columns. The corresponding + // matrix entries are entered by assemble_in() and + // assemble_out(). + if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) || + mg_constrained_dofs->at_refinement_edge(level, i2[k])) + continue; + + // At the boundary, only enter the term on the + // diagonal, but not the coupling terms + if ((mg_constrained_dofs->is_boundary_index(level, i1[j]) || + mg_constrained_dofs->is_boundary_index(level, i2[k])) && + (i1[j] != i2[k])) + continue; + + G.add(i1[j], i2[k], M(j,k)); + } + } } - + template inline void @@ -1085,28 +1081,18 @@ namespace MeshWorker { if (level1 == level2) { - if (mg_constrained_dofs == 0) - { - assemble((*matrix)[level1], info1.matrix(0,false).matrix, info1.indices, info1.indices); - assemble((*matrix)[level1], info1.matrix(0,true).matrix, info1.indices, info2.indices); - assemble((*matrix)[level1], info2.matrix(0,false).matrix, info2.indices, info2.indices); - assemble((*matrix)[level1], info2.matrix(0,true).matrix, info2.indices, info1.indices); - } - else - { - assemble((*matrix)[level1], info1.matrix(0,false).matrix, info1.indices, info1.indices, level1); - assemble((*matrix)[level1], info1.matrix(0,true).matrix, info1.indices, info2.indices, level1); - assemble((*matrix)[level1], info2.matrix(0,false).matrix, info2.indices, info2.indices, level1); - assemble((*matrix)[level1], info2.matrix(0,true).matrix, info2.indices, info1.indices, level1); - } - } + assemble((*matrix)[level1], info1.matrix(0,false).matrix, info1.indices, info1.indices, level1); + assemble((*matrix)[level1], info1.matrix(0,true).matrix, info1.indices, info2.indices, level1); + assemble((*matrix)[level1], info2.matrix(0,false).matrix, info2.indices, info2.indices, level1); + assemble((*matrix)[level1], info2.matrix(0,true).matrix, info2.indices, info1.indices, level1); + } else { Assert(level1 > level2, ExcInternalError()); // Do not add info2.M1, // which is done by // the coarser cell - assemble((*matrix)[level1], info1.matrix(0,false).matrix, info1.indices, info1.indices); + assemble((*matrix)[level1], info1.matrix(0,false).matrix, info1.indices, info1.indices, level1); if (level1>0) { assemble_up((*flux_up)[level1],info1.matrix(0,true).matrix, info2.indices, info1.indices, level1); @@ -1122,20 +1108,10 @@ namespace MeshWorker if (level1 == level2) { - if (mg_constrained_dofs == 0) - { - assemble((*matrix)[level1], info1.matrix(k,false).matrix, info1.indices_by_block[row], info1.indices_by_block[column]); - assemble((*matrix)[level1], info1.matrix(k,true).matrix, info1.indices_by_block[row], info2.indices_by_block[column]); - assemble((*matrix)[level1], info2.matrix(k,false).matrix, info2.indices_by_block[row], info2.indices_by_block[column]); - assemble((*matrix)[level1], info2.matrix(k,true).matrix, info2.indices_by_block[row], info1.indices_by_block[column]); - } - else - { - assemble((*matrix)[level1], info1.matrix(k,false).matrix, info1.indices_by_block[row], info1.indices_by_block[column], level1); - assemble((*matrix)[level1], info1.matrix(k,true).matrix, info1.indices_by_block[row], info2.indices_by_block[column], level1); - assemble((*matrix)[level1], info2.matrix(k,false).matrix, info2.indices_by_block[row], info2.indices_by_block[column], level1); - assemble((*matrix)[level1], info2.matrix(k,true).matrix, info2.indices_by_block[row], info1.indices_by_block[column], level1); - } + assemble((*matrix)[level1], info1.matrix(k,false).matrix, info1.indices_by_block[row], info1.indices_by_block[column], level1); + assemble((*matrix)[level1], info1.matrix(k,true).matrix, info1.indices_by_block[row], info2.indices_by_block[column], level1); + assemble((*matrix)[level1], info2.matrix(k,false).matrix, info2.indices_by_block[row], info2.indices_by_block[column], level1); + assemble((*matrix)[level1], info2.matrix(k,true).matrix, info2.indices_by_block[row], info1.indices_by_block[column], level1); } else { @@ -1143,7 +1119,7 @@ namespace MeshWorker // Do not add info2.M1, // which is done by // the coarser cell - assemble((*matrix)[level1], info1.matrix(k,false).matrix, info1.indices_by_block[row], info1.indices_by_block[column]); + assemble((*matrix)[level1], info1.matrix(k,false).matrix, info1.indices_by_block[row], info1.indices_by_block[column], level1); if (level1>0) { assemble_up((*flux_up)[level1],info1.matrix(k,true).matrix, info2.indices_by_block[row], info1.indices_by_block[column], level1); diff --git a/deal.II/include/deal.II/multigrid/mg_constrained_dofs.h b/deal.II/include/deal.II/multigrid/mg_constrained_dofs.h index 610e0344a4..9d528f1204 100644 --- a/deal.II/include/deal.II/multigrid/mg_constrained_dofs.h +++ b/deal.II/include/deal.II/multigrid/mg_constrained_dofs.h @@ -106,47 +106,53 @@ public: * Return the indices of dofs for each level that lie on the * boundary of the domain. */ -// TODO: remove const std::vector > & get_boundary_indices () const; /** - * Return the indices of dofs for each level that lie on the - * boundary of the domain. - */ - // TODO: remove - const std::vector > & - get_non_refinement_edge_indices () const; - - /** + * @deprecated Use at_refinement_edge() if possible, else + * get_refinement_edge_indices(unsigned int). + * * Return the indices of dofs for each level that lie on the * refinement edge (i.e. are on faces between cells of this level * and cells on the level below). */ - // TODO: remove const std::vector > & - get_refinement_edge_indices () const; + get_refinement_edge_indices () const DEAL_II_DEPRECATED; /** + * Return the indices of dofs on the given level that lie on an + * refinement edge (dofs on faces to neighbors that are coarser) + */ + const IndexSet & + get_refinement_edge_indices (unsigned int level) const; + + /** + * @deprecated Use at_refinement_edge_boundary() if possible, else + * use get_refinement_edge_boundary_indices(). + * * Return the indices of dofs for each level that are in the * intersection of the sets returned by get_boundary_indices() and * get_refinement_edge_indices(). */ - // TODO: remove const std::vector > & - get_refinement_edge_boundary_indices () const; + get_refinement_edge_boundary_indices () const DEAL_II_DEPRECATED; /** - * Return if boundary_indices need to be set or not. + * Return indices of all dofs that are on boundary faces on the given level + * if the cell has refinement edge indices (i.e. has a coarser neighbor). */ - - bool set_boundary_values () const; + const IndexSet & + get_refinement_edge_boundary_indices (unsigned int level) const; /** - * Return if the finite element requires continuity across - * refinement edges. + * @deprecated The function is_boundary_index() now returns false if + * no boundary values are set. + * + * Return if boundary_indices need to be set or not. */ - bool continuity_across_refinement_edges () const; + bool set_boundary_values () const DEAL_II_DEPRECATED; + private: /** @@ -154,17 +160,11 @@ private: */ std::vector > boundary_indices; - /** - * The degrees of freedom on egdges that are not a refinement edge - * between a level and coarser cells. - */ - std::vector > non_refinement_edge_indices; - /** * The degrees of freedom on the refinement edge between a level and * coarser cells. */ - std::vector > refinement_edge_indices; + std::vector refinement_edge_indices; /** * The degrees of freedom on the refinement edge between a level and @@ -172,7 +172,17 @@ private: * * This is a subset of #refinement_edge_indices. */ - std::vector > refinement_edge_boundary_indices; + std::vector refinement_edge_boundary_indices; + + /** + * old data structure only filled on demand + */ + mutable std::vector > refinement_edge_boundary_indices_old; + + /** + * old data structure only filled on demand + */ + mutable std::vector > refinement_edge_indices_old; }; @@ -184,16 +194,15 @@ MGConstrainedDoFs::initialize(const DoFHandler &dof) const unsigned int nlevels = dof.get_tria().n_global_levels(); refinement_edge_indices.resize(nlevels); refinement_edge_boundary_indices.resize(nlevels); - non_refinement_edge_indices.resize(nlevels); + refinement_edge_indices_old.clear(); + refinement_edge_boundary_indices_old.clear(); for (unsigned int l=0; l > & -MGConstrainedDoFs::get_non_refinement_edge_indices () const +const std::vector > & +MGConstrainedDoFs::get_refinement_edge_indices () const { - return non_refinement_edge_indices; + if (refinement_edge_indices_old.size()!=refinement_edge_indices.size()) + { + unsigned int n_levels = refinement_edge_indices.size(); + refinement_edge_indices_old.resize(n_levels); + for (unsigned int l=0;l > & -MGConstrainedDoFs::get_refinement_edge_indices () const +const IndexSet & +MGConstrainedDoFs::get_refinement_edge_indices (unsigned int level) const { - return refinement_edge_indices; + AssertIndexRange(level, refinement_edge_indices.size()); + return refinement_edge_indices[level]; } inline const std::vector > & MGConstrainedDoFs::get_refinement_edge_boundary_indices () const { - return refinement_edge_boundary_indices; + if (refinement_edge_boundary_indices_old.size()!=refinement_edge_boundary_indices.size()) + { + unsigned int n_levels = refinement_edge_boundary_indices.size(); + refinement_edge_boundary_indices_old.resize(n_levels); + for (unsigned int l=0;l void extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, - std::vector > &interface_dofs, - std::vector > &boundary_interface_dofs); + std::vector &interface_dofs, + std::vector &boundary_interface_dofs); /** * Does the same as the function above, @@ -302,7 +302,16 @@ namespace MGTools template void extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, - std::vector > &interface_dofs); + std::vector &interface_dofs); + + /** + * As above but with a deprecated data structure. This makes one additional copy. + */ + template + void + extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, + std::vector > &interface_dofs) DEAL_II_DEPRECATED; + template void diff --git a/deal.II/include/deal.II/numerics/vector_tools.templates.h b/deal.II/include/deal.II/numerics/vector_tools.templates.h index bc7ed01a80..bd85dd0cb0 100644 --- a/deal.II/include/deal.II/numerics/vector_tools.templates.h +++ b/deal.II/include/deal.II/numerics/vector_tools.templates.h @@ -410,7 +410,7 @@ namespace VectorTools const unsigned int max_rep_points = *std::max_element(n_rep_points.begin(), n_rep_points.end()); - std::vector dofs_on_cell(fe.max_dofs_per_cell()); + std::vector< types::global_dof_index> dofs_on_cell(fe.max_dofs_per_cell()); std::vector< Point > rep_points(max_rep_points); std::vector< std::vector > function_values_scalar(fe.size()); diff --git a/deal.II/source/lac/block_sparsity_pattern.cc b/deal.II/source/lac/block_sparsity_pattern.cc index 0536414ef5..d07c32c912 100644 --- a/deal.II/source/lac/block_sparsity_pattern.cc +++ b/deal.II/source/lac/block_sparsity_pattern.cc @@ -741,6 +741,27 @@ namespace TrilinosWrappers + BlockSparsityPattern:: + BlockSparsityPattern (const std::vector &row_parallel_partitioning, + const std::vector &col_parallel_partitioning, + const std::vector &writable_rows, + const MPI_Comm &communicator) + : + BlockSparsityPatternBase + (row_parallel_partitioning.size(), + col_parallel_partitioning.size()) + { + for (size_type i=0; iblock(i,j).reinit(row_parallel_partitioning[i], + col_parallel_partitioning[j], + writable_rows[i], + communicator); + this->collect_sizes(); + } + + + void BlockSparsityPattern::reinit (const std::vector &row_block_sizes, const std::vector &col_block_sizes) diff --git a/deal.II/source/lac/trilinos_block_vector.cc b/deal.II/source/lac/trilinos_block_vector.cc index 4f21ce6136..d1b5a6f536 100644 --- a/deal.II/source/lac/trilinos_block_vector.cc +++ b/deal.II/source/lac/trilinos_block_vector.cc @@ -146,7 +146,8 @@ namespace TrilinosWrappers void BlockVector::reinit (const std::vector ¶llel_partitioning, const std::vector &ghost_values, - const MPI_Comm &communicator) + const MPI_Comm &communicator, + const bool vector_writable) { const size_type no_blocks = parallel_partitioning.size(); std::vector block_sizes (no_blocks); @@ -161,7 +162,8 @@ namespace TrilinosWrappers components.resize(n_blocks()); for (size_type i=0; iGraph())); + compress(); } @@ -475,6 +479,7 @@ namespace TrilinosWrappers { // release memory before reallocation matrix.reset(); + nonlocal_matrix.reset(); // if we want to exchange data, build a usual Trilinos sparsity pattern // and let that handle the exchange. otherwise, manually create a @@ -582,9 +587,10 @@ namespace TrilinosWrappers (Copy, sparsity_pattern.trilinos_sparsity_pattern(), false)); if (sparsity_pattern.nonlocal_graph.get() != 0) - { - nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *sparsity_pattern.nonlocal_graph)); - } + nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *sparsity_pattern.nonlocal_graph)); + else + nonlocal_matrix.reset (); + compress(); last_action = Zero; } @@ -598,6 +604,11 @@ namespace TrilinosWrappers matrix.reset (); matrix.reset (new Epetra_FECrsMatrix (Copy, sparse_matrix.trilinos_sparsity_pattern(), false)); + if (sparse_matrix.nonlocal_matrix != 0) + nonlocal_matrix.reset (new Epetra_CrsMatrix + (Copy, sparse_matrix.nonlocal_matrix->Graph())); + else + nonlocal_matrix.reset(); compress(); } @@ -755,6 +766,7 @@ namespace TrilinosWrappers const Epetra_CrsGraph *graph = &input_matrix.Graph(); + nonlocal_matrix.reset(); matrix.reset (); matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false)); @@ -801,8 +813,10 @@ namespace TrilinosWrappers // flush buffers int ierr; - if (nonlocal_matrix.get() != 0) + if (nonlocal_matrix.get() != 0 && mode == Add) { + // do only export in case of an add() operation, otherwise the owning + // processor must have set the correct entry nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap()); Epetra_Export exporter(nonlocal_matrix->RowMap(), matrix->RowMap()); ierr = matrix->Export(*nonlocal_matrix, exporter, mode); diff --git a/deal.II/source/lac/trilinos_sparsity_pattern.cc b/deal.II/source/lac/trilinos_sparsity_pattern.cc index 85ca1111fc..b406c7300f 100644 --- a/deal.II/source/lac/trilinos_sparsity_pattern.cc +++ b/deal.II/source/lac/trilinos_sparsity_pattern.cc @@ -544,7 +544,7 @@ namespace TrilinosWrappers if (Utilities::MPI::n_mpi_processes(communicator) > 1) { Epetra_Map nonlocal_map = - nonlocal_partitioner.make_trilinos_map(communicator, false); + nonlocal_partitioner.make_trilinos_map(communicator, true); nonlocal_graph.reset(new Epetra_CrsGraph(Copy, nonlocal_map, 0)); } else diff --git a/deal.II/source/lac/trilinos_vector.cc b/deal.II/source/lac/trilinos_vector.cc index ea330e88e3..4ff1d598e8 100644 --- a/deal.II/source/lac/trilinos_vector.cc +++ b/deal.II/source/lac/trilinos_vector.cc @@ -186,6 +186,8 @@ namespace TrilinosWrappers Vector::reinit (const Epetra_Map &input_map, const bool fast) { + nonlocal_vector.reset(); + if (vector->Map().SameAs(input_map)==false) vector.reset (new Epetra_FEVector(input_map)); else if (fast == false) @@ -205,6 +207,8 @@ namespace TrilinosWrappers const MPI_Comm &communicator, const bool fast) { + nonlocal_vector.reset(); + Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator, true); reinit (map, fast); @@ -217,13 +221,11 @@ namespace TrilinosWrappers const bool fast, const bool allow_different_maps) { - // In case we do not allow to - // have different maps, this - // call means that we have to - // reset the vector. So clear - // the vector, initialize our - // map with the map in v, and - // generate the vector. + nonlocal_vector.reset(); + + // In case we do not allow to have different maps, this call means that + // we have to reset the vector. So clear the vector, initialize our map + // with the map in v, and generate the vector. if (allow_different_maps == false) { if (vector->Map().SameAs(v.vector->Map()) == false) @@ -250,14 +252,10 @@ namespace TrilinosWrappers } } - // Otherwise, we have to check - // that the two vectors are - // already of the same size, - // create an object for the data - // exchange and then insert all - // the data. The first assertion - // is only a check whether the - // user knows what she is doing. + // Otherwise, we have to check that the two vectors are already of the + // same size, create an object for the data exchange and then insert all + // the data. The first assertion is only a check whether the user knows + // what she is doing. else { Assert (fast == false, @@ -284,19 +282,16 @@ namespace TrilinosWrappers Vector::reinit (const BlockVector &v, const bool import_data) { - // In case we do not allow to - // have different maps, this - // call means that we have to - // reset the vector. So clear - // the vector, initialize our - // map with the map in v, and - // generate the vector. + nonlocal_vector.reset(); + + // In case we do not allow to have different maps, this call means that + // we have to reset the vector. So clear the vector, initialize our map + // with the map in v, and generate the vector. if (v.n_blocks() == 0) return; - // create a vector that holds all the elements - // contained in the block vector. need to - // manually create an Epetra_Map. + // create a vector that holds all the elements contained in the block + // vector. need to manually create an Epetra_Map. size_type n_elements = 0, added_elements = 0, block_offset = 0; for (size_type block=0; block 1) + { + Epetra_Map nonlocal_map = + nonlocal_entries.make_trilinos_map(communicator, true); + nonlocal_vector.reset(new Epetra_MultiVector(nonlocal_map, 1)); + } + } } Vector & Vector::operator = (const Vector &v) { - // distinguish three cases. First case: both - // vectors have the same layout (just need to - // copy the local data, not reset the memory - // and the underlying Epetra_Map). The third - // case means that we have to rebuild the - // calling vector. + // distinguish three cases. First case: both vectors have the same + // layout (just need to copy the local data, not reset the memory and + // the underlying Epetra_Map). The third case means that we have to + // rebuild the calling vector. if (vector->Map().SameAs(v.vector->Map())) { *vector = *v.vector; + if (v.nonlocal_vector.get() != 0) + nonlocal_vector.reset(new Epetra_MultiVector(v.nonlocal_vector->Map(), 1)); last_action = Zero; } // Second case: vectors have the same global @@ -389,6 +409,9 @@ namespace TrilinosWrappers has_ghosts = v.has_ghosts; } + if (v.nonlocal_vector.get() != 0) + nonlocal_vector.reset(new Epetra_MultiVector(v.nonlocal_vector->Map(), 1)); + return *this; } @@ -397,6 +420,8 @@ namespace TrilinosWrappers Vector & Vector::operator = (const TrilinosWrappers::Vector &v) { + nonlocal_vector.reset(); + Assert (size() == v.size(), ExcDimensionMismatch(size(), v.size())); Epetra_Import data_exchange (vector->Map(), v.vector->Map()); diff --git a/deal.II/source/lac/trilinos_vector_base.cc b/deal.II/source/lac/trilinos_vector_base.cc index 48b0afcd0c..95a9b1387b 100644 --- a/deal.II/source/lac/trilinos_vector_base.cc +++ b/deal.II/source/lac/trilinos_vector_base.cc @@ -21,6 +21,7 @@ # include # include +# include DEAL_II_NAMESPACE_OPEN @@ -55,13 +56,9 @@ namespace TrilinosWrappers Assert (index < vector.size(), ExcIndexRange (index, 0, vector.size())); - // Trilinos allows for vectors - // to be referenced by the [] or - // () operators but only () - // checks index bounds. We check - // these bounds by ourselves, so - // we can use []. Note that we - // can only get local values. + // Trilinos allows for vectors to be referenced by the [] or () + // operators but only () checks index bounds. We check these bounds by + // ourselves, so we can use []. Note that we can only get local values. const TrilinosWrappers::types::int_type local_index = vector.vector->Map().LID(static_cast(index)); @@ -112,9 +109,8 @@ namespace TrilinosWrappers void VectorBase::clear () { - // When we clear the vector, - // reset the pointer and generate - // an empty vector. + // When we clear the vector, reset the pointer and generate an empty + // vector. #ifdef DEAL_II_WITH_MPI Epetra_Map map (0, 0, Epetra_MpiComm(MPI_COMM_SELF)); #else @@ -186,18 +182,67 @@ namespace TrilinosWrappers + void + VectorBase::compress (::dealii::VectorOperation::values given_last_action) + { + //Select which mode to send to Trilinos. Note that we use last_action if + //available and ignore what the user tells us to detect wrongly mixed + //operations. Typically given_last_action is only used on machines that do + //not execute an operation (because they have no own cells for example). + Epetra_CombineMode mode = last_action; + if (last_action == Zero) + { + if (given_last_action==::dealii::VectorOperation::add) + mode = Add; + else if (given_last_action==::dealii::VectorOperation::insert) + mode = Insert; + } + +#ifdef DEBUG +# ifdef DEAL_II_WITH_MPI + // check that every process has decided to use the same mode. This will + // otherwise result in undefined behaviour in the call to + // GlobalAssemble(). + double double_mode = mode; + Utilities::MPI::MinMaxAvg result + = Utilities::MPI::min_max_avg (double_mode, + dynamic_cast + (&vector_partitioner().Comm())->GetMpiComm()); + Assert(result.max-result.min<1e-5, + ExcMessage ("Not all processors agree whether the last operation on " + "this vector was an addition or a set operation. This will " + "prevent the compress() operation from succeeding.")); + +# endif +#endif + + // Now pass over the information about what we did last to the vector. + int ierr = 0; + if (nonlocal_vector.get() == 0 || mode != Add) + ierr = vector->GlobalAssemble(mode); + else + { + Epetra_Export exporter(nonlocal_vector->Map(), vector->Map()); + ierr = vector->Export(*nonlocal_vector, exporter, mode); + nonlocal_vector->PutScalar(0.); + } + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + last_action = Zero; + + compressed = true; + } + + + TrilinosScalar VectorBase::el (const size_type index) const { - // Extract local indices in - // the vector. + // Extract local indices in the vector. TrilinosWrappers::types::int_type trilinos_i = vector->Map().LID(static_cast(index)); TrilinosScalar value = 0.; - // If the element is not - // present on the current - // processor, we can't + // If the element is not present on the current processor, we can't // continue. Just print out 0. // TODO: Is this reasonable? @@ -218,17 +263,13 @@ namespace TrilinosWrappers TrilinosScalar VectorBase::operator () (const size_type index) const { - // Extract local indices in - // the vector. + // Extract local indices in the vector. TrilinosWrappers::types::int_type trilinos_i = vector->Map().LID(static_cast(index)); TrilinosScalar value = 0.; - // If the element is not present - // on the current processor, we - // can't continue. This is the - // main difference to the el() - // function. + // If the element is not present on the current processor, we can't + // continue. This is the main difference to the el() function. if (trilinos_i == -1 ) { Assert (false, ExcAccessToNonlocalElement(index, local_range().first, diff --git a/deal.II/source/multigrid/mg_tools.cc b/deal.II/source/multigrid/mg_tools.cc index a9da2451be..85bf569a4f 100644 --- a/deal.II/source/multigrid/mg_tools.cc +++ b/deal.II/source/multigrid/mg_tools.cc @@ -1213,7 +1213,7 @@ namespace MGTools for (; cell!=endc; ++cell) { if (dof.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id - && cell->level_subdomain_id()!=dof.get_tria().locally_owned_subdomain()) + && cell->level_subdomain_id()==numbers::artificial_subdomain_id) continue; const FiniteElement &fe = cell->get_fe(); const unsigned int level = cell->level(); @@ -1247,7 +1247,7 @@ namespace MGTools endc = dof.end(); for (; cell!=endc; ++cell) if (dof.get_tria().locally_owned_subdomain()==numbers::invalid_subdomain_id - || cell->level_subdomain_id()==dof.get_tria().locally_owned_subdomain()) + || cell->level_subdomain_id()!=numbers::artificial_subdomain_id) for (unsigned int face_no = 0; face_no < GeometryInfo::faces_per_cell; ++face_no) { @@ -1416,7 +1416,7 @@ namespace MGTools template void extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, - std::vector > &interface_dofs) + std::vector &interface_dofs) { Assert (interface_dofs.size() == mg_dof_handler.get_tria().n_global_levels(), ExcDimensionMismatch (interface_dofs.size(), @@ -1427,10 +1427,7 @@ namespace MGTools Assert (interface_dofs[l].size() == mg_dof_handler.n_dofs(l), ExcDimensionMismatch (interface_dofs[l].size(), mg_dof_handler.n_dofs(l))); - - std::fill (interface_dofs[l].begin(), - interface_dofs[l].end(), - false); + interface_dofs[l].clear(); } const FiniteElement &fe = mg_dof_handler.get_fe(); @@ -1475,7 +1472,29 @@ namespace MGTools for (unsigned int i=0; i + void + extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, + std::vector > &interface_dofs) + { + std::vector temp; + temp.resize(interface_dofs.size()); + for (unsigned int l=0;l void extract_non_interface_dofs (const DoFHandler &mg_dof_handler, - std::vector > &non_interface_dofs) + std::vector > &non_interface_dofs) { Assert (non_interface_dofs.size() == mg_dof_handler.get_tria().n_global_levels(), ExcDimensionMismatch (non_interface_dofs.size(), @@ -1552,8 +1571,8 @@ namespace MGTools template void extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, - std::vector > &interface_dofs, - std::vector > &boundary_interface_dofs) + std::vector &interface_dofs, + std::vector &boundary_interface_dofs) { Assert (interface_dofs.size() == mg_dof_handler.get_tria().n_global_levels(), ExcDimensionMismatch (interface_dofs.size(), @@ -1564,19 +1583,8 @@ namespace MGTools for (unsigned int l=0; l &fe = mg_dof_handler.get_fe(); @@ -1639,10 +1647,10 @@ namespace MGTools for (unsigned int i=0; i &mg_dof_handler, - std::vector > &interface_dofs, - std::vector > &boundary_interface_dofs); + std::vector &interface_dofs, + std::vector &boundary_interface_dofs); + template + void + extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, + std::vector &interface_dofs); template void extract_inner_interface_dofs (const DoFHandler &mg_dof_handler, std::vector > &interface_dofs); + template void extract_non_interface_dofs (const DoFHandler & mg_dof_handler, diff --git a/deal.II/source/multigrid/mg_transfer_block.cc b/deal.II/source/multigrid/mg_transfer_block.cc index d8dcd64ed1..35442b79a0 100644 --- a/deal.II/source/multigrid/mg_transfer_block.cc +++ b/deal.II/source/multigrid/mg_transfer_block.cc @@ -442,7 +442,7 @@ void MGTransferBlockBase::build_matrices ( constraints_per_block[block].resize(0); constraints_per_block[block].resize(n_dofs, 0); for (types::global_dof_index i=0; i