From: turcksin Date: Tue, 28 May 2013 20:43:53 +0000 (+0000) Subject: Merge from mainline. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=bd70335a35afe3112272fd21f4694ed9e452b445;p=dealii-svn.git Merge from mainline. git-svn-id: https://svn.dealii.org/branches/branch_bigger_global_dof_indices_4@29663 0785d39b-7218-0410-832d-ea1e28bc413d --- bd70335a35afe3112272fd21f4694ed9e452b445 diff --cc deal.II/include/deal.II/fe/fe.h index 7a671ccbea,fbf458b9fe..628bb2699a --- a/deal.II/include/deal.II/fe/fe.h +++ b/deal.II/include/deal.II/fe/fe.h @@@ -1501,35 -1057,22 +1057,22 @@@ public face_system_to_base_index (const unsigned int index) const; /** - * Given a base element number, - * return the first block of a - * BlockVector it would generate. + * Given a base element number, return the first block of a BlockVector it + * would generate. */ - unsigned int first_block_of_base (const unsigned int b) const; + types::global_dof_index first_block_of_base (const unsigned int b) const; /** - * For each vector component, - * return which base - * element implements this - * component and which vector - * component in this base element - * this is. This information is - * only of interest for - * vector-valued finite elements - * which are composed of several - * sub-elements. In that case, - * one may want to obtain - * information about the element - * implementing a certain vector - * component, which can be done - * using this function and the - * FESystem::base_element() - * function. + * For each vector component, return which base element implements this + * component and which vector component in this base element this is. This + * information is only of interest for vector-valued finite elements which + * are composed of several sub-elements. In that case, one may want to + * obtain information about the element implementing a certain vector + * component, which can be done using this function and the + * FESystem::base_element() function. * - * If this is a scalar finite - * element, then the return value - * is always equal to a pair of - * zeros. + * If this is a scalar finite element, then the return value is always equal + * to a pair of zeros. */ std::pair component_to_base_index (const unsigned int component) const; @@@ -1544,11 -1086,9 +1086,9 @@@ block_to_base_index (const unsigned int block) const; /** - * The vector block and the index - * inside the block for this - * shape function. + * The vector block and the index inside the block for this shape function. */ - std::pair + std::pair system_to_block_index (const unsigned int component) const; /** diff --cc deal.II/include/deal.II/lac/block_vector_base.h index 81acc9db74,9a2344c042..adeb3f2142 --- a/deal.II/include/deal.II/lac/block_vector_base.h +++ b/deal.II/include/deal.II/lac/block_vector_base.h @@@ -835,8 -844,28 +848,28 @@@ public * is the sum of the dimensions of all * components. */ - unsigned int size () const; + std::size_t size () const; + /** + * Return an index set that describes which elements of this vector + * are owned by the current processor. Note that this index set does + * not include elements this vector may store locally as ghost + * elements but that are in fact owned by another processor. + * As a consequence, the index sets returned on different + * processors if this is a distributed vector will form disjoint + * sets that add up to the complete index set. + * Obviously, if a vector is created on only one processor, then + * the result would satisfy + * @code + * vec.locally_owned_elements() == complete_index_set (vec.size()) + * @endcode + * + * For block vectors, this function returns the union of the + * locally owned elements of the individual blocks, shifted by + * their respective index offsets. + */ + IndexSet locally_owned_elements () const; + /** * Return an iterator pointing to * the first element. diff --cc deal.II/include/deal.II/lac/chunk_sparse_matrix.templates.h index d6fad7a4d6,bbc1fb9944..e696bcd1ae --- a/deal.II/include/deal.II/lac/chunk_sparse_matrix.templates.h +++ b/deal.II/include/deal.II/lac/chunk_sparse_matrix.templates.h @@@ -192,26 -185,26 +192,28 @@@ namespace interna const unsigned int end_row, const number *values, const std::size_t *rowstart, -- const unsigned int *colnums, ++ const size_type *colnums, const InVector &src, OutVector &dst) { -- const unsigned int m = cols.n_rows(); -- const unsigned int n = cols.n_cols(); -- const unsigned int chunk_size = cols.get_chunk_size(); ++ const size_type m = cols.n_rows(); ++ const size_type n = cols.n_cols(); ++ const size_type chunk_size = cols.get_chunk_size(); // loop over all chunks. note that we need to treat the last chunk row // and column differently if they have padding elements -- const unsigned int n_filled_last_rows = m % chunk_size; -- const unsigned int n_filled_last_cols = n % chunk_size; ++ const size_type n_filled_last_rows = m % chunk_size; ++ const size_type n_filled_last_cols = n % chunk_size; -- const unsigned int last_regular_row = n_filled_last_rows > 0 ? -- std::min(m/chunk_size, end_row) : end_row; -- const unsigned int irregular_col = n/chunk_size; ++ const size_type last_regular_row = n_filled_last_rows > 0 ? ++ std::min(m/chunk_size, ++ static_cast(end_row)) : ++ end_row; ++ const size_type irregular_col = n/chunk_size; typename OutVector::iterator dst_ptr = dst.begin()+chunk_size*begin_row; const number *val_ptr= &values[rowstart[begin_row]*chunk_size*chunk_size]; -- const unsigned int *colnum_ptr = &colnums[rowstart[begin_row]]; ++ const size_type *colnum_ptr = &colnums[rowstart[begin_row]]; for (unsigned int chunk_row=begin_row; chunk_row 0 && end_row == (m/chunk_size+1)) { -- const unsigned int chunk_row = last_regular_row; ++ const size_type chunk_row = last_regular_row; const number *const val_end_of_row = &values[rowstart[chunk_row+1] * chunk_size * chunk_size]; @@@ -250,16 -243,16 +252,16 @@@ if (*colnum_ptr != irregular_col) { // we're at a chunk row but not column that has padding -- for (unsigned int r=0; r - void set_zero_parallel(const dealii::ConstraintMatrix &cm, parallel::distributed::Vector &vec, unsigned int shift = 0) ++ void set_zero_parallel(const dealii::ConstraintMatrix &cm, parallel::distributed::Vector &vec, size_type shift = 0) + { + for (unsigned int i=0; i void set_zero_in_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, internal::bool2type) { diff --cc deal.II/include/deal.II/lac/parallel_block_vector.h index c221a6858a,63b2395979..d42f572fcb --- a/deal.II/include/deal.II/lac/parallel_block_vector.h +++ b/deal.II/include/deal.II/lac/parallel_block_vector.h @@@ -82,31 -77,21 +77,21 @@@ namespace paralle typedef typename BaseClass::const_iterator const_iterator; /** - * Constructor. There are three - * ways to use this - * constructor. First, without - * any arguments, it generates - * an object with no - * blocks. Given one argument, - * it initializes num_blocks - * blocks, but these blocks have - * size zero. The third variant - * finally initializes all - * blocks to the same size - * block_size. + * Constructor. There are three ways to use this constructor. First, + * without any arguments, it generates an object with no blocks. Given + * one argument, it initializes num_blocks blocks, but these + * blocks have size zero. The third variant finally initializes all + * blocks to the same size block_size. * - * Confer the other constructor - * further down if you intend to - * use blocks of different - * sizes. + * Confer the other constructor further down if you intend to use + * blocks of different sizes. */ - explicit BlockVector (const unsigned int num_blocks = 0, - const unsigned int block_size = 0); + explicit BlockVector (const size_type num_blocks = 0, + const size_type block_size = 0); /** - * Copy-Constructor. Dimension set to - * that of V, all components are copied - * from V + * Copy-Constructor. Dimension set to that of V, all components are + * copied from V */ BlockVector (const BlockVector &V); @@@ -138,17 -115,13 +115,13 @@@ #endif /** - * Constructor. Set the number of - * blocks to - * block_sizes.size() and - * initialize each block with - * block_sizes[i] zero - * elements. + * Constructor. Set the number of blocks to block_sizes.size() + * and initialize each block with block_sizes[i] zero elements. */ - BlockVector (const std::vector &block_sizes); + BlockVector (const std::vector &block_sizes); /** - * Destructor. Clears memory + * Destructor. Clears memory. */ ~BlockVector (); @@@ -184,88 -153,55 +153,55 @@@ operator= (const Vector &V); /** - * Reinitialize the BlockVector to - * contain num_blocks blocks of + * Reinitialize the BlockVector to contain num_blocks blocks of * size block_size each. * - * If the second argument is left - * at its default value, then the - * block vector allocates the - * specified number of blocks but - * leaves them at zero size. You - * then need to later - * reinitialize the individual - * blocks, and call - * collect_sizes() to update the - * block system's knowledge of + * If the second argument is left at its default value, then the block + * vector allocates the specified number of blocks but leaves them at + * zero size. You then need to later reinitialize the individual blocks, + * and call collect_sizes() to update the block system's knowledge of * its individual block's sizes. * - * If fast==false, the vector - * is filled with zeros. + * If fast==false, the vector is filled with zeros. */ - void reinit (const unsigned int num_blocks, - const unsigned int block_size = 0, + void reinit (const size_type num_blocks, + const size_type block_size = 0, const bool fast = false); /** - * Reinitialize the BlockVector such that - * it contains - * block_sizes.size() - * blocks. Each block is reinitialized to + * Reinitialize the BlockVector such that it contains + * block_sizes.size() blocks. Each block is reinitialized to * dimension block_sizes[i]. * - * If the number of blocks is the - * same as before this function - * was called, all vectors remain - * the same and reinit() is - * called for each vector. + * If the number of blocks is the same as before this function was + * called, all vectors remain the same and reinit() is called for each + * vector. * - * If fast==false, the vector - * is filled with zeros. + * If fast==false, the vector is filled with zeros. * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() on one of the blocks, then subsequent + * actions on this object may yield unpredictable results since they may + * be routed to the wrong block. */ - void reinit (const std::vector &N, - const bool fast=false); + void reinit (const std::vector &N, + const bool fast=false); /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. + * Change the dimension to that of the vector V. The same + * applies as for the other reinit() function. * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). + * The elements of V are not copied, i.e. this function is the + * same as calling reinit (V.size(), fast). * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() of one of the - * blocks, then subsequent - * actions of this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() of one of the blocks, then subsequent + * actions of this object may yield unpredictable results since they may + * be routed to the wrong block. */ template void reinit (const BlockVector &V, diff --cc deal.II/include/deal.II/lac/parallel_vector.h index ac46606f6d,c752a9a6c7..1d979bced1 --- a/deal.II/include/deal.II/lac/parallel_vector.h +++ b/deal.II/include/deal.II/lac/parallel_vector.h @@@ -97,9 -97,22 +97,22 @@@ namespace paralle typedef const value_type *const_iterator; typedef value_type &reference; typedef const value_type &const_reference; - typedef size_t size_type; + typedef types::global_dof_index size_type; typedef typename numbers::NumberTraits::real_type real_type; + /** + * A variable that indicates whether this vector + * supports distributed data storage. If true, then + * this vector also needs an appropriate compress() + * function that allows communicating recent set or + * add operations to individual elements to be communicated + * to other processors. + * + * For the current class, the variable equals + * true, since it does support parallel data storage. + */ + static const bool supports_distributed_data = true; + /** * @name 1: Basic Object-handling */ @@@ -116,29 -128,22 +128,22 @@@ Vector (const Vector &in_vector); /** - * Constructs a parallel vector of the given - * global size without any actual parallel - * distribution. + * Constructs a parallel vector of the given global size without any + * actual parallel distribution. */ - Vector (const unsigned int size); + Vector (const size_type size); /** - * Constructs a parallel vector. The local - * range is specified by @p locally_owned_set - * (note that this must be a contiguous - * interval, multiple intervals are not - * possible). The IndexSet @p ghost_indices - * specifies ghost indices, i.e., indices - * which one might need to read data from or - * accumulate data from. It is allowed that - * the set of ghost indices also contains the - * local range, but it does not need to. + * Constructs a parallel vector. The local range is specified by @p + * locally_owned_set (note that this must be a contiguous interval, + * multiple intervals are not possible). The IndexSet @p ghost_indices + * specifies ghost indices, i.e., indices which one might need to read + * data from or accumulate data from. It is allowed that the set of + * ghost indices also contains the local range, but it does not need to. * - * This function involves global - * communication, so it should only be called - * once for a given layout. Use the - * constructor with Vector argument to - * create additional vectors with the same + * This function involves global communication, so it should only be + * called once for a given layout. Use the constructor with + * Vector argument to create additional vectors with the same * parallel layout. */ Vector (const IndexSet &local_range, @@@ -161,25 -164,20 +164,20 @@@ ~Vector (); /** - * Sets the global size of the vector to @p - * size without any actual parallel - * distribution. + * Sets the global size of the vector to @p size without any actual + * parallel distribution. */ - void reinit (const unsigned int size, - const bool fast = false); + void reinit (const size_type size, + const bool fast = false); /** - * Uses the parallel layout of the input - * vector @p in_vector and allocates memory - * for this vector. Recommended initialization - * function when several vectors with the same - * layout should be created. + * Uses the parallel layout of the input vector @p in_vector and + * allocates memory for this vector. Recommended initialization function + * when several vectors with the same layout should be created. * - * If the flag @p fast is set to false, the - * memory will be initialized with zero, - * otherwise the memory will be untouched (and - * the user must make sure to fill it with - * reasonable data before using it). + * If the flag @p fast is set to false, the memory will be initialized + * with zero, otherwise the memory will be untouched (and the user must + * make sure to fill it with reasonable data before using it). */ template void reinit(const Vector &in_vector, @@@ -483,47 -434,64 +434,64 @@@ real_type linfty_norm () const; /** - * Returns the global size of the vector, - * equal to the sum of the number of locally - * owned indices among all the processors. + * Returns the global size of the vector, equal to the sum of the number + * of locally owned indices among all the processors. */ - types::global_dof_index size () const; + size_type size () const; /** - * Returns the local size of the vector, i.e., - * the number of indices owned locally. + * Returns the local size of the vector, i.e., the number of indices + * owned locally. */ - unsigned int local_size() const; + size_type local_size() const; /** - * Returns the half-open interval that - * specifies the locally owned range of the - * vector. Note that local_size() == - * local_range().second - + * Returns the half-open interval that specifies the locally owned range + * of the vector. Note that local_size() == local_range().second - * local_range().first. */ - std::pair local_range () const; + std::pair local_range () const; /** - * Returns true if the given global index is - * in the local range of this processor. + * Returns true if the given global index is in the local range of this + * processor. */ - bool in_local_range (const types::global_dof_index global_index) const; + bool in_local_range (const size_type global_index) const; /** - * Returns the number of ghost elements - * present on the vector. + * Return an index set that describes which elements of this vector + * are owned by the current processor. Note that this index set does + * not include elements this vector may store locally as ghost + * elements but that are in fact owned by another processor. + * As a consequence, the index sets returned on different + * processors if this is a distributed vector will form disjoint + * sets that add up to the complete index set. + * Obviously, if a vector is created on only one processor, then + * the result would satisfy + * @code + * vec.locally_owned_elements() == complete_index_set (vec.size()) + * @endcode + */ + IndexSet locally_owned_elements () const; + + /** + * Returns the number of ghost elements present on the vector. */ - unsigned int n_ghost_entries () const; + size_type n_ghost_entries () const; /** - * Returns whether the given global index is a - * ghost index on the present - * processor. Returns false for indices that - * are owned locally and for indices not - * present at all. + * Return an index set that describes which elements of this vector are + * not owned by the current processor but can be written into or read + * from locally (ghost elements). */ - bool is_ghost_entry (const size_type global_index) const; + const IndexSet& ghost_elements() const; + + /** + * Returns whether the given global index is a ghost index on the + * present processor. Returns false for indices that are owned locally + * and for indices not present at all. + */ + bool is_ghost_entry (const types::global_dof_index global_index) const; /** * Make the @p Vector class a bit like the vector<> class of @@@ -560,71 -528,64 +528,64 @@@ //@{ /** - * Read access to the data in the - * position corresponding to @p - * global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. + * Read access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. + * + * Performance: O(1) for locally owned elements that represent + * a contiguous range and O(log(nranges)) for ghost + * elements (quite fast, but slower than local_element()). */ - Number operator () (const types::global_dof_index global_index) const; + Number operator () (const size_type global_index) const; /** - * Read and write access to the data - * in the position corresponding to - * @p global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. + * Read and write access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. + * + * Performance: O(1) for locally owned elements that represent + * a contiguous range and O(log(nranges)) for ghost + * elements (quite fast, but slower than local_element()). */ - Number &operator () (const types::global_dof_index global_index); + Number &operator () (const size_type global_index); /** - * Read access to the data in the - * position corresponding to @p - * global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. + * Read access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. * - * This function does the same thing - * as operator(). + * This function does the same thing as operator(). */ - Number operator [] (const types::global_dof_index global_index) const; + Number operator [] (const size_type global_index) const; /** - * Read and write access to the data - * in the position corresponding to - * @p global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. + * Read and write access to the data in the position corresponding to @p + * global_index. The index must be either in the local range of the + * vector or be specified as a ghost index at construction. * - * This function does the same thing - * as operator(). + * This function does the same thing as operator(). */ - Number &operator [] (const types::global_dof_index global_index); + Number &operator [] (const size_type global_index); /** - * Read access to the data field specified by - * @p local_index. Locally owned indices can - * be accessed with indices - * [0,local_size), and ghost - * indices with indices - * [local_size,local_size+ - * n_ghost_entries]. + * Read access to the data field specified by @p local_index. Locally + * owned indices can be accessed with indices + * [0,local_size), and ghost indices with indices + * [local_size,local_size+ n_ghost_entries]. + * + * Performance: Direct array access (fast). */ - Number local_element (const unsigned int local_index) const; + Number local_element (const size_type local_index) const; /** - * Read and write access to the data field - * specified by @p local_index. Locally owned - * indices can be accessed with indices - * [0,local_size), and ghost - * indices with indices + * Read and write access to the data field specified by @p + * local_index. Locally owned indices can be accessed with indices + * [0,local_size), and ghost indices with indices * [local_size,local_size+n_ghosts]. + * + * Performance: Direct array access (fast). */ - Number &local_element (const unsigned int local_index); + Number &local_element (const size_type local_index); //@} @@@ -646,40 -605,29 +605,29 @@@ Vector &operator -= (const Vector &V); /** - * A collective add operation: - * This funnction adds a whole - * set of values stored in @p - * values to the vector - * components specified by @p - * indices. + * A collective add operation: This funnction adds a whole set of values + * stored in @p values to the vector components specified by @p indices. */ template - void add (const std::vector &indices, + void add (const std::vector &indices, const std::vector &values); /** - * This is a second collective - * add operation. As a - * difference, this function - * takes a deal.II vector of - * values. + * This is a second collective add operation. As a difference, this + * function takes a deal.II vector of values. */ template - void add (const std::vector &indices, + void add (const std::vector &indices, const ::dealii::Vector &values); /** - * Take an address where - * n_elements are stored - * contiguously and add them into - * the vector. Handles all cases - * which are not covered by the - * other two add() - * functions above. + * Take an address where n_elements are stored contiguously and + * add them into the vector. Handles all cases which are not covered by + * the other two add() functions above. */ template - void add (const unsigned int n_elements, - const unsigned int *indices, + void add (const size_type n_elements, + const size_type *indices, const OtherNumber *values); /** @@@ -891,14 -854,12 +854,12 @@@ std_cxx1x::shared_ptr partitioner; /** - * The size that is currently allocated in the - * val array. + * The size that is currently allocated in the val array. */ - unsigned int allocated_size; + size_type allocated_size; /** - * Pointer to the array of - * local elements of this vector. + * Pointer to the array of local elements of this vector. */ Number *val; @@@ -958,16 -908,19 +908,19 @@@ void clear_mpi_requests (); /** - * A helper function that is used to resize - * the val array. + * A helper function that is used to resize the val array. */ - void resize_val (const unsigned int new_allocated_size); + void resize_val (const size_type new_allocated_size); /* - * Make all other vector types - * friends. + * Make all other vector types friends. */ template friend class Vector; + + /** + * Make BlockVector type friends. + */ + template friend class BlockVector; }; /*@}*/ @@@ -1411,9 -1453,24 +1456,24 @@@ + template + inline + IndexSet + Vector::locally_owned_elements() const + { + IndexSet is (size()); + + const std::pair x = local_range(); + is.add_range (x.first, x.second); + + return is; + } + + + template inline - unsigned int + typename Vector::size_type Vector::n_ghost_entries () const { return partitioner->n_ghost_indices(); diff --cc deal.II/include/deal.II/lac/parallel_vector.templates.h index 30cfce14f3,f95d78697c..c78dee5259 --- a/deal.II/include/deal.II/lac/parallel_vector.templates.h +++ b/deal.II/include/deal.II/lac/parallel_vector.templates.h @@@ -294,14 -305,15 +305,15 @@@ namespace paralle // make this function thread safe Threads::Mutex::ScopedLock lock (mutex); - const unsigned int n_import_targets = part.import_targets().size(); - const unsigned int n_ghost_targets = part.ghost_targets().size(); + const size_type n_import_targets = part.import_targets().size(); + const size_type n_ghost_targets = part.ghost_targets().size(); - AssertDimension (n_ghost_targets+n_import_targets, - compress_requests.size()); + if (operation != dealii::VectorOperation::insert) + AssertDimension (n_ghost_targets+n_import_targets, + compress_requests.size()); // first wait for the receive to complete - if (n_import_targets > 0) + if (compress_requests.size() > 0 && n_import_targets > 0) { int ierr; ierr = MPI_Waitall (n_import_targets, &compress_requests[0], @@@ -309,20 -321,26 +321,26 @@@ Assert (ierr == MPI_SUCCESS, ExcInternalError()); Number *read_position = import_data; - std::vector >::const_iterator + std::vector >::const_iterator my_imports = part.import_indices().begin(); - // If add_ghost_data is set, add the imported - // data to the local values. If not, set the - // vector entries. - if (add_ghost_data == true) + // If the operation is no insertion, add the imported data to the + // local values. For insert, nothing is done here (but in debug mode + // we assert that the specified value is either zero or matches with + // the ones already present + if (operation != dealii::VectorOperation::insert) for ( ; my_imports!=part.import_indices().end(); ++my_imports) - for (unsigned int j=my_imports->first; jsecond; j++) + for (size_type j=my_imports->first; jsecond; j++) local_element(j) += *read_position++; else for ( ; my_imports!=part.import_indices().end(); ++my_imports) - for (size_type j=my_imports->first; jsecond; j++) - local_element(j) = *read_position++; - for (unsigned int j=my_imports->first; jsecond; ++ for (size_type j=my_imports->first; jsecond; + j++, read_position++) + Assert(*read_position == 0. || + std::abs(local_element(j) - *read_position) < + std::abs(local_element(j)) * 100. * + std::numeric_limits::epsilon(), + ExcMessage("Inserted elements do not match.")); AssertDimension(read_position-import_data,part.n_import_indices()); } diff --cc deal.II/include/deal.II/lac/petsc_parallel_vector.h index 31a064bc0c,0ace189356..1ee6f87ae4 --- a/deal.II/include/deal.II/lac/petsc_parallel_vector.h +++ b/deal.II/include/deal.II/lac/petsc_parallel_vector.h @@@ -153,12 -153,20 +153,25 @@@ namespace PETScWrapper class Vector : public VectorBase { public: + /** + * Declare type for container size. + */ + typedef types::global_dof_index size_type; + /** + * A variable that indicates whether this vector + * supports distributed data storage. If true, then + * this vector also needs an appropriate compress() + * function that allows communicating recent set or + * add operations to individual elements to be communicated + * to other processors. + * + * For the current class, the variable equals + * true, since it does support parallel data storage. + */ + static const bool supports_distributed_data = true; + + /** * Default constructor. Initialize the * vector as empty. */ diff --cc deal.II/include/deal.II/lac/petsc_vector.h index 9f4775dbc8,7193166e27..0ee5ca8162 --- a/deal.II/include/deal.II/lac/petsc_vector.h +++ b/deal.II/include/deal.II/lac/petsc_vector.h @@@ -47,11 -47,21 +47,26 @@@ namespace PETScWrapper class Vector : public VectorBase { public: + /** + * Declare type for container size. + */ + typedef types::global_dof_index size_type; + + /** + * A variable that indicates whether this vector + * supports distributed data storage. If true, then + * this vector also needs an appropriate compress() + * function that allows communicating recent set or + * add operations to individual elements to be communicated + * to other processors. + * + * For the current class, the variable equals + * false, since it does not support parallel data storage. + * If you do need parallel data storage, use + * PETScWrappers::MPI::Vector. + */ + static const bool supports_distributed_data = false; + /** * Default constructor. Initialize the * vector as empty. diff --cc deal.II/include/deal.II/lac/petsc_vector_base.h index 1942207ffd,4222be10e1..630bd2a493 --- a/deal.II/include/deal.II/lac/petsc_vector_base.h +++ b/deal.II/include/deal.II/lac/petsc_vector_base.h @@@ -398,8 -390,24 +398,24 @@@ namespace PETScWrapper * in the local range or not, * see also local_range(). */ - bool in_local_range (const unsigned int index) const; + bool in_local_range (const size_type index) const; + /** + * Return an index set that describes which elements of this vector + * are owned by the current processor. Note that this index set does + * not include elements this vector may store locally as ghost + * elements but that are in fact owned by another processor. + * As a consequence, the index sets returned on different + * processors if this is a distributed vector will form disjoint + * sets that add up to the complete index set. + * Obviously, if a vector is created on only one processor, then + * the result would satisfy + * @code + * vec.locally_owned_elements() == complete_index_set (vec.size()) + * @endcode + */ + IndexSet locally_owned_elements () const; + /** * Return if the vector contains ghost * elements. @@@ -1131,10 -1139,25 +1147,25 @@@ &begin, &end); AssertThrow (ierr == 0, ExcPETScError(ierr)); - return ((index >= static_cast(begin)) && - (index < static_cast(end))); + return ((index >= static_cast(begin)) && + (index < static_cast(end))); } + + inline + IndexSet + VectorBase::locally_owned_elements() const + { + IndexSet is (size()); + + // PETSc only allows for contiguous local ranges, so this is simple + const std::pair x = local_range(); + is.add_range (x.first, x.second); + return is; + } + + + inline bool VectorBase::has_ghost_elements() const diff --cc deal.II/include/deal.II/lac/precondition.h index d95e2ac92a,6f78cf440b..f1964bbe6f --- a/deal.II/include/deal.II/lac/precondition.h +++ b/deal.II/include/deal.II/lac/precondition.h @@@ -900,15 -898,9 +906,14 @@@ template void solve (const PETScWrappers::MatrixBase &A, - std::vector &r_eigenvalues, - std::vector &r_eigenvectors = std::vector (), - const size_type n_eigenvectors = 1); + std::vector &eigenvalues, + std::vector &eigenvectors, - const unsigned int n_eigenpairs = 1); ++ const size_type n_eigenpairs = 1); /** * Same as above, but here a composite method for solving the @@@ -167,9 -164,24 +168,24 @@@ void solve (const PETScWrappers::MatrixBase &A, const PETScWrappers::MatrixBase &B, - std::vector &r_eigenvalues, - std::vector &r_eigenvectors = std::vector (), - const size_type n_eigenvectors = 1); + std::vector &eigenvalues, + std::vector &eigenvectors, - const unsigned int n_eigenpairs = 1); ++ const size_type n_eigenpairs = 1); + + /** + * Same as above, but here a composite method for solving the + * system $A x=\lambda B x$ with real matrices $A, B$ and + * imaginary eigenpairs $x, \lamda$. + */ + template + void + solve (const PETScWrappers::MatrixBase &A, + const PETScWrappers::MatrixBase &B, + std::vector &real_eigenvalues, + std::vector &imag_eigenvalues, + std::vector &real_eigenvectors, + std::vector &imag_eigenvectors, + const unsigned int n_eigenpairs = 1); /** * Set the initial vector for the solver. @@@ -715,74 -739,126 +743,126 @@@ * This is declared here to make it possible to take a std::vector * of different PETScWrappers vector types */ + // todo: The logic of these functions can be simplified without breaking backward compatibility... template - void - SolverBase::solve (const PETScWrappers::MatrixBase &A, - std::vector &kr, - std::vector &vr, - const size_type n_eigenvectors) - { - // Panic if the number of eigenpairs wanted is out of bounds. - AssertThrow ((n_eigenvectors > 0) && (n_eigenvectors <= A.m ()), - ExcSLEPcWrappersUsageError()); - - // Set the matrices of the problem - set_matrices (A); - - // and solve - unsigned int n_converged = 0; - solve (n_eigenvectors, &n_converged); - - if (n_converged > n_eigenvectors) - n_converged = n_eigenvectors; - AssertThrow (n_converged == n_eigenvectors, - ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenvectors)); - - AssertThrow (vr.size() != 0, ExcSLEPcWrappersUsageError()); - vr.resize (n_converged, vr.front()); - kr.resize (n_converged); - - for (size_type index=0; index &eigenvalues, + std::vector &eigenvectors, - const unsigned int n_eigenpairs) ++ const size_type n_eigenpairs) + { + // Panic if the number of eigenpairs wanted is out of bounds. + AssertThrow ((n_eigenpairs > 0) && (n_eigenpairs <= A.m ()), + ExcSLEPcWrappersUsageError()); + + // Set the matrices of the problem + set_matrices (A); + + // and solve - unsigned int n_converged = 0; ++ size_type n_converged = 0; + solve (n_eigenpairs, &n_converged); + + if (n_converged > n_eigenpairs) + n_converged = n_eigenpairs; + AssertThrow (n_converged == n_eigenpairs, + ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs)); + + AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError()); + eigenvectors.resize (n_converged, eigenvectors.front()); + eigenvalues.resize (n_converged); + - for (unsigned int index=0; index - void + void SolverBase::solve (const PETScWrappers::MatrixBase &A, const PETScWrappers::MatrixBase &B, - std::vector &kr, - std::vector &vr, - const size_type n_eigenvectors) - { - // Guard against incompatible matrix sizes: - AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m())); - AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n())); + std::vector &eigenvalues, + std::vector &eigenvectors, - const unsigned int n_eigenpairs) ++ const size_type n_eigenpairs) + { + // Guard against incompatible matrix sizes: + AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m())); + AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n())); + + // Panic if the number of eigenpairs wanted is out of bounds. + AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()), + ExcSLEPcWrappersUsageError()); + + // Set the matrices of the problem + set_matrices (A, B); + + // and solve - unsigned int n_converged = 0; ++ size_type n_converged = 0; + solve (n_eigenpairs, &n_converged); + + if (n_converged>=n_eigenpairs) + n_converged = n_eigenpairs; + + AssertThrow (n_converged==n_eigenpairs, + ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs)); + AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError()); + + eigenvectors.resize (n_converged, eigenvectors.front()); + eigenvalues.resize (n_converged); + - for (unsigned int index=0; index 0) && (n_eigenvectors <= A.m ()), - ExcSLEPcWrappersUsageError()); - - // Set the matrices of the problem - set_matrices (A, B); - - // and solve - unsigned int n_converged = 0; - solve (n_eigenvectors, &n_converged); - - if (n_converged >= n_eigenvectors) - n_converged = n_eigenvectors; - - AssertThrow (n_converged == n_eigenvectors, - ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenvectors)); - AssertThrow (vr.size() != 0, ExcSLEPcWrappersUsageError()); - - vr.resize (n_converged, vr.front()); - kr.resize (n_converged); + template + void + SolverBase::solve (const PETScWrappers::MatrixBase &A, + const PETScWrappers::MatrixBase &B, + std::vector &real_eigenvalues, + std::vector &imag_eigenvalues, + std::vector &real_eigenvectors, + std::vector &imag_eigenvectors, - const unsigned int n_eigenpairs) ++ const size_type n_eigenpairs) + { + // Guard against incompatible matrix sizes: + AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m())); + AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n())); + + // and incompatible eigenvalue/eigenvector sizes + AssertThrow (real_eigenvalues.size() == imag_eigenvalues.size(), + ExcDimensionMismatch(real_eigenvalues.size(), imag_eigenvalues.size())); + AssertThrow (real_eigenvectors.size() == imag_eigenvectors.n (), + ExcDimensionMismatch(real_eigenvectors.size(), imag_eigenvectors.size())); + + // Panic if the number of eigenpairs wanted is out of bounds. + AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()), + ExcSLEPcWrappersUsageError()); + + // Set the matrices of the problem + set_matrices (A, B); + + // and solve - unsigned int n_converged = 0; ++ size_type n_converged = 0; + solve (n_eigenpairs, &n_converged); + + if (n_converged>=n_eigenpairs) + n_converged = n_eigenpairs; + + AssertThrow (n_converged==n_eigenpairs, + ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs)); + AssertThrow ((real_eigenvectors.size()!=0) && (imag_eigenvectors.size()!=0), + ExcSLEPcWrappersUsageError()); + + real_eigenvectors.resize (n_converged, real_eigenvectors.front()); + imag_eigenvectors.resize (n_converged, imag_eigenvectors.front()); + real_eigenvalues.resize (n_converged); + imag_eigenvalues.resize (n_converged); + - for (unsigned int index=0; index long_val_array; -- std::vector long_index_array; ++ std::vector long_index_array; // If we don't elide zeros, the pointers are already available... need to @@@ -3342,14 -3319,14 +3342,14 @@@ last_action = Add; - int *col_index_ptr; + TrilinosWrappers::types::int_type *col_index_ptr; TrilinosScalar *col_value_ptr; - int n_columns; + TrilinosWrappers::types::int_type n_columns; double short_val_array[100]; -- int short_index_array[100]; ++ TrilinosWrappers::types::int_type short_index_array[100]; std::vector long_val_array; -- std::vector long_index_array; ++ std::vector long_index_array; // If we don't elide zeros, the pointers are already available... need to // cast to non-const pointers as that is the format taken by Trilinos (but @@@ -3684,14 -3643,16 +3684,16 @@@ Assert (matrix->Filled(), ExcMatrixNotCompressed()); internal::SparseMatrix::check_vector_map_equality(*matrix, src, dst); - const int dst_local_size = dst.end() - dst.begin(); - AssertDimension (dst_local_size, matrix->RangeMap().NumMyElements()); - const int src_local_size = src.end() - src.begin(); - AssertDimension (src_local_size, matrix->DomainMap().NumMyElements()); + const size_type dst_local_size = dst.end() - dst.begin(); + AssertDimension (dst_local_size, static_cast(matrix->RangeMap().NumMyElements())); + const size_type src_local_size = src.end() - src.begin(); + AssertDimension (src_local_size, static_cast(matrix->DomainMap().NumMyElements())); - Epetra_Vector tril_dst (View, matrix->RangeMap(), dst.begin()); - Epetra_Vector tril_src (View, matrix->DomainMap(), - const_cast(src.begin())); + Epetra_MultiVector tril_dst (View, matrix->RangeMap(), dst.begin(), + matrix->DomainMap().NumMyPoints(), 1); + Epetra_MultiVector tril_src (View, matrix->DomainMap(), + const_cast(src.begin()), + matrix->DomainMap().NumMyPoints(), 1); const int ierr = matrix->Multiply (false, tril_src, tril_dst); Assert (ierr == 0, ExcTrilinosError(ierr)); @@@ -3710,14 -3671,16 +3712,16 @@@ Assert (matrix->Filled(), ExcMatrixNotCompressed()); internal::SparseMatrix::check_vector_map_equality(*matrix, dst, src); - const int dst_local_size = dst.end() - dst.begin(); - AssertDimension (dst_local_size, matrix->DomainMap().NumMyElements()); - const int src_local_size = src.end() - src.begin(); - AssertDimension (src_local_size, matrix->RangeMap().NumMyElements()); + const size_type dst_local_size = dst.end() - dst.begin(); + AssertDimension (dst_local_size, static_cast(matrix->DomainMap().NumMyElements())); + const size_type src_local_size = src.end() - src.begin(); + AssertDimension (src_local_size, static_cast(matrix->RangeMap().NumMyElements())); - Epetra_Vector tril_dst (View, matrix->DomainMap(), dst.begin()); - Epetra_Vector tril_src (View, matrix->RangeMap(), - const_cast(src.begin())); + Epetra_MultiVector tril_dst (View, matrix->DomainMap(), dst.begin(), + matrix->DomainMap().NumMyPoints(), 1); + Epetra_MultiVector tril_src (View, matrix->RangeMap(), + const_cast(src.begin()), + matrix->DomainMap().NumMyPoints(), 1); const int ierr = matrix->Multiply (true, tril_src, tril_dst); Assert (ierr == 0, ExcTrilinosError(ierr)); diff --cc deal.II/include/deal.II/lac/trilinos_vector.h index 18383ff2e8,79bafae4fa..e1302dff34 --- a/deal.II/include/deal.II/lac/trilinos_vector.h +++ b/deal.II/include/deal.II/lac/trilinos_vector.h @@@ -188,11 -166,19 +188,24 @@@ namespace TrilinosWrapper class Vector : public VectorBase { public: + /** + * Declare type for container size. + */ + typedef dealii::types::global_dof_index size_type; + + /** + * A variable that indicates whether this vector + * supports distributed data storage. If true, then + * this vector also needs an appropriate compress() + * function that allows communicating recent set or + * add operations to individual elements to be communicated + * to other processors. + * + * For the current class, the variable equals + * true, since it does support parallel data storage. + */ + static const bool supports_distributed_data = true; + /** * @name Basic constructors and initalization. */ @@@ -598,11 -584,21 +611,26 @@@ class Vector : public VectorBase { public: + /** + * Declare type for container size. + */ + typedef dealii::types::global_dof_index size_type; + + /** + * A variable that indicates whether this vector + * supports distributed data storage. If true, then + * this vector also needs an appropriate compress() + * function that allows communicating recent set or + * add operations to individual elements to be communicated + * to other processors. + * + * For the current class, the variable equals + * false, since it does not support parallel data storage. + * If you do need parallel data storage, use + * TrilinosWrappers::MPI::Vector. + */ + static const bool supports_distributed_data = false; + /** * Default constructor that * generates an empty (zero size) diff --cc deal.II/include/deal.II/lac/trilinos_vector_base.h index aed6e15110,f8d2b168b3..0d1fcb47ca --- a/deal.II/include/deal.II/lac/trilinos_vector_base.h +++ b/deal.II/include/deal.II/lac/trilinos_vector_base.h @@@ -524,8 -519,24 +524,24 @@@ namespace TrilinosWrapper * @note The same limitation for the applicability of this * function applies as listed in the documentation of local_range(). */ - bool in_local_range (const unsigned int index) const; + bool in_local_range (const size_type index) const; + /** + * Return an index set that describes which elements of this vector + * are owned by the current processor. Note that this index set does + * not include elements this vector may store locally as ghost + * elements but that are in fact owned by another processor. + * As a consequence, the index sets returned on different + * processors if this is a distributed vector will form disjoint + * sets that add up to the complete index set. + * Obviously, if a vector is created on only one processor, then + * the result would satisfy + * @code + * vec.locally_owned_elements() == complete_index_set (vec.size()) + * @endcode + */ + IndexSet locally_owned_elements () const; + /** * Return if the vector contains ghost * elements. This answer is true if there diff --cc deal.II/include/deal.II/lac/vector.h index 0b530d0662,6c7fe3e9ce..bfd739458f --- a/deal.II/include/deal.II/lac/vector.h +++ b/deal.II/include/deal.II/lac/vector.h @@@ -585,8 -599,28 +599,28 @@@ public * Since this is not a distributed * vector the method always returns true. */ - bool in_local_range (const types::global_dof_index global_index) const; + bool in_local_range (const size_type global_index) const; + /** + * Return an index set that describes which elements of this vector + * are owned by the current processor. Note that this index set does + * not include elements this vector may store locally as ghost + * elements but that are in fact owned by another processor. + * As a consequence, the index sets returned on different + * processors if this is a distributed vector will form disjoint + * sets that add up to the complete index set. + * Obviously, if a vector is created on only one processor, then + * the result would satisfy + * @code + * vec.locally_owned_elements() == complete_index_set (vec.size()) + * @endcode + * + * Since the current data type does not support parallel data storage + * across different processors, the returned index set is the + * complete index set. + */ + IndexSet locally_owned_elements () const; + /** * Return dimension of the vector. */ diff --cc deal.II/include/deal.II/lac/vector.templates.h index de1216b6cd,5cb3ba459a..5502623c73 --- a/deal.II/include/deal.II/lac/vector.templates.h +++ b/deal.II/include/deal.II/lac/vector.templates.h @@@ -337,20 -335,20 +337,20 @@@ namespace interna } template - void copy_subrange (const unsigned int begin, - const unsigned int end, + void copy_subrange (const size_type begin, + const size_type end, const dealii::Vector &src, - dealii::Vector &dst) + dealii::Vector &dst) { memcpy(&*(dst.begin()+begin), &*(src.begin()+begin), (end-begin)*sizeof(T)); } template - void copy_subrange (const unsigned int begin, - const unsigned int end, + void copy_subrange (const size_type begin, + const size_type end, const dealii::Vector &src, - dealii::Vector &dst) + dealii::Vector &dst) { const T *q = src.begin()+begin; const T *const end_q = src.begin()+end; @@@ -360,20 -358,23 +360,23 @@@ } template - void copy_subrange_wrap (const unsigned int begin, - const unsigned int end, + void copy_subrange_wrap (const size_type begin, + const size_type end, const dealii::Vector &src, - dealii::Vector &dst) + dealii::Vector &dst) { copy_subrange (begin, end, src, dst); } template void copy_vector (const dealii::Vector &src, - dealii::Vector &dst) + dealii::Vector &dst) { + if (PointerComparison::equal(&src, &dst)) + return; + - const unsigned int vec_size = src.size(); - const unsigned int dst_size = dst.size(); + const size_type vec_size = src.size(); + const size_type dst_size = dst.size(); if (dst_size != vec_size) dst.reinit (vec_size, true); if (vec_size>internal::Vector::minimum_parallel_grain_size) diff --cc deal.II/include/deal.II/multigrid/mg_transfer.h index 3dca22527c,3662b26b45..b87a922d98 --- a/deal.II/include/deal.II/multigrid/mg_transfer.h +++ b/deal.II/include/deal.II/multigrid/mg_transfer.h @@@ -218,11 -246,10 +246,10 @@@ private /** * Sizes of the multi-level vectors. */ - std::vector sizes; + std::vector sizes; /** - * Sparsity patterns for transfer - * matrices. + * Sparsity patterns for transfer matrices. */ std::vector::Sparsity> > prolongation_sparsities; @@@ -237,14 -261,37 +261,37 @@@ std::vector::Matrix> > prolongation_matrices; /** - * Mapping for the - * copy_to/from_mg-functions. - * The data is first the global - * index, then the level index. + * Mapping for the copy_to_mg() and copy_from_mg() functions. Here only + * index pairs locally owned + * + * The data is organized as follows: one vector per level. Each + * element of these vectors contains first the global index, then + * the level index. */ - std::vector > > + std::vector > > copy_indices; + /** + * Additional degrees of freedom for the copy_to_mg() + * function. These are the ones where the global degree of freedom + * is locally owned and the level degree of freedom is not. + * + * Organization of the data is like for #copy_indices_mine. + */ - std::vector > > ++ std::vector > > + copy_indices_to_me; + + /** + * Additional degrees of freedom for the copy_from_mg() + * function. These are the ones where the level degree of freedom + * is locally owned and the global degree of freedom is not. + * + * Organization of the data is like for #copy_indices_mine. + */ - std::vector > > ++ std::vector > > + copy_indices_from_me; + + /** * The vector that stores what * has been given to the diff --cc deal.II/include/deal.II/multigrid/mg_transfer.templates.h index be3364f996,50027ee368..fc69e3a4e0 --- a/deal.II/include/deal.II/multigrid/mg_transfer.templates.h +++ b/deal.II/include/deal.II/multigrid/mg_transfer.templates.h @@@ -193,18 -192,30 +192,30 @@@ MGTransferPrebuilt::copy_from_m // have fine level basis // functions dst = 0; - for (unsigned int level=0; level >::const_iterator IT; + typedef std::vector >::const_iterator IT; - if (constraints == 0) - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end(); ++i) - dst(i->first) = src[level](i->second); + // First copy all indices local to this process + if (constraints==0) + for (IT i= copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + dst(i->first) = src[level](i->second); else - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end(); ++i) - constraints->distribute_local_to_global(i->first, src[level](i->second), dst); + for (IT i= copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + constraints->distribute_local_to_global(i->first, src[level](i->second), dst); + + // Do the same for the indices where the level index is local, + // but the global index is not + if (constraints==0) + for (IT i= copy_indices_from_me[level].begin(); + i != copy_indices_from_me[level].end(); ++i) + dst(i->first) = src[level](i->second); + else + for (IT i= copy_indices_from_me[level].begin(); + i != copy_indices_from_me[level].end(); ++i) + constraints->distribute_local_to_global(i->first, src[level](i->second), dst); } } @@@ -225,12 -236,28 +236,28 @@@ MGTransferPrebuilt::copy_from_m // to the coarse level, but // have fine level basis // functions - for (unsigned int level=0; level >::const_iterator IT; + typedef std::vector >::const_iterator IT; - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end(); ++i) - dst(i->first) += src[level](i->second); + if (constraints==0) + for (IT i= copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + dst(i->first) += src[level](i->second); + else + for (IT i= copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + constraints->distribute_local_to_global(i->first, src[level](i->second), dst); + + // Do the same for the indices where the level index is local, + // but the global index is not + if (constraints==0) + for (IT i= copy_indices_from_me[level].begin(); + i != copy_indices_from_me[level].end(); ++i) + dst(i->first) += src[level](i->second); + else + for (IT i= copy_indices_from_me[level].begin(); + i != copy_indices_from_me[level].end(); ++i) + constraints->distribute_local_to_global(i->first, src[level](i->second), dst); } } diff --cc deal.II/include/deal.II/numerics/vector_tools.templates.h index 575a416a2e,e598adf066..6e90a698fa --- a/deal.II/include/deal.II/numerics/vector_tools.templates.h +++ b/deal.II/include/deal.II/numerics/vector_tools.templates.h @@@ -610,7 -586,7 +586,7 @@@ namespace VectorTool // check if constraints are compatible (see below) bool constraints_are_compatible = true; { -- for (std::map::iterator it=boundary_values.begin(); ++ for (std::map::iterator it=boundary_values.begin(); it != boundary_values.end(); ++it) if (constraints.is_constrained(it->first)) if (!(constraints.get_constraint_entries(it->first)->size() > 0 diff --cc deal.II/source/dofs/dof_tools.cc index 48167c84d8,dab901128c..7f184c6068 --- a/deal.II/source/dofs/dof_tools.cc +++ b/deal.II/source/dofs/dof_tools.cc @@@ -2786,7 -2775,147 +2775,147 @@@ namespace DoFTool - template + namespace + { + // enter constraints for periodicity into the given ConstraintMatrix object. + // this function is called when at least one of the two face iterators corresponds + // to an active object without further children + // + // @param transformation A matrix that maps degrees of freedom from one face + // to another. If the DoFs on the two faces are supposed to match exactly, then + // the matrix so provided will be the identity matrix. if face 2 is once refined + // from face 1, then the matrix needs to be the interpolation matrix from a face + // to this particular child + // + // @precondition: face_1 is supposed to be active + template + void + set_periodicity_constraints (const FaceIterator &face_1, + const typename identity::type &face_2, + const FullMatrix &transformation, + dealii::ConstraintMatrix &constraint_matrix, + const ComponentMask &component_mask, + const bool face_orientation, + const bool face_flip, + const bool face_rotation) + { + static const int dim = FaceIterator::AccessorType::dimension; + static const int spacedim = FaceIterator::AccessorType::space_dimension; + + // we should be in the case where face_1 is active, i.e. has no children: + Assert (!face_1->has_children(), + ExcInternalError()); + + Assert (face_1->n_active_fe_indices() == 1, + ExcInternalError()); + + // if face_2 does have children, then we need to iterate over them + if (face_2->has_children()) + { + Assert (face_2->n_children() == GeometryInfo::max_children_per_face, + ExcNotImplemented()); + const unsigned int dofs_per_face + = face_1->get_fe(face_1->nth_active_fe_index(0)).dofs_per_face; + FullMatrix child_transformation (dofs_per_face, dofs_per_face); + FullMatrix subface_interpolation (dofs_per_face, dofs_per_face); + for (unsigned int c=0; cn_children(); ++c) + { + // get the interpolation matrix recursively from the one that + // interpolated from face_1 to face_2 by multiplying from the + // left with the one that interpolates from face_2 to + // its child + face_1->get_fe(face_1->nth_active_fe_index(0)) + .get_subface_interpolation_matrix (face_1->get_fe(face_1->nth_active_fe_index(0)), + c, + subface_interpolation); + subface_interpolation.mmult (child_transformation, transformation); + set_periodicity_constraints(face_1, face_2->child(c), + child_transformation, + constraint_matrix, component_mask, + face_orientation, face_flip, face_rotation); + } + } + else + // both faces are active. we need to match the corresponding DoFs of both faces + { + const unsigned int face_1_index = face_1->nth_active_fe_index(0); + const unsigned int face_2_index = face_2->nth_active_fe_index(0); + Assert(face_1->get_fe(face_1_index) == face_2->get_fe(face_1_index), + ExcMessage ("Matching periodic cells need to use the same finite element")); + + const FiniteElement &fe = face_1->get_fe(face_1_index); + + Assert(component_mask.represents_n_components(fe.n_components()), + ExcMessage ("The number of components in the mask has to be either " + "zero or equal to the number of components in the finite " "element.")); + + const unsigned int dofs_per_face = fe.dofs_per_face; + - std::vector dofs_1(dofs_per_face); - std::vector dofs_2(dofs_per_face); ++ std::vector dofs_1(dofs_per_face); ++ std::vector dofs_2(dofs_per_face); + + face_1->get_dof_indices(dofs_1, face_1_index); + face_2->get_dof_indices(dofs_2, face_2_index); + + // Well, this is a hack: + // + // There is no + // face_to_face_index(face_index, + // face_orientation, + // face_flip, + // face_rotation) + // function in FiniteElementData, so we have to use + // face_to_cell_index(face_index, face + // face_orientation, + // face_flip, + // face_rotation) + // But this will give us an index on a cell - something we cannot work + // with directly. But luckily we can match them back :-] + + std::map cell_to_rotated_face_index; + + // Build up a cell to face index for face_2: + for (unsigned int i = 0; i < dofs_per_face; ++i) + { + const unsigned int cell_index = fe.face_to_cell_index(i, 0, /* It doesn't really matter, just assume + * we're on the first face... + */ + true, false, false // default orientation + ); + cell_to_rotated_face_index[cell_index] = i; + } + + // loop over all dofs on face 2 and constrain them again the ones on face 1 + for (unsigned int i=0; i void make_periodicity_constraints (const FaceIterator &face_1, const typename identity::type &face_2, @@@ -5167,7 -5221,7 +5227,7 @@@ Assert (col_entry != weights[first_used_row].end(), ExcInternalError()); if ((col_entry->second == 1) && -- (representants[first_used_row] == static_cast(global_dof))) ++ (representants[first_used_row] == global_dof)) // dof unconstrained or constrained to itself (in case this // cell is mapped to itself, rather than to children of // itself) diff --cc deal.II/source/lac/chunk_sparsity_pattern.cc index 498568f4b1,5bcb7c0470..697360a1b8 --- a/deal.II/source/lac/chunk_sparsity_pattern.cc +++ b/deal.II/source/lac/chunk_sparsity_pattern.cc @@@ -727,7 -722,7 +727,7 @@@ ChunkSparsityPattern::memory_consumptio // explicit instantiations template void ChunkSparsityPattern::copy_from (const SparsityPattern &, -- const unsigned int, ++ const size_type, const bool); template void ChunkSparsityPattern::copy_from (const CompressedSparsityPattern &, diff --cc deal.II/source/lac/slepc_solver.cc index ae7088a473,4640a0e50c..638f7b0f9d --- a/deal.II/source/lac/slepc_solver.cc +++ b/deal.II/source/lac/slepc_solver.cc @@@ -109,8 -109,8 +109,8 @@@ namespace SLEPcWrapper } void - SolverBase::solve (const size_type n_eigenvectors, - SolverBase::solve (const unsigned int n_eigenpairs, - unsigned int *n_converged) ++ SolverBase::solve (const size_type n_eigenpairs, + size_type *n_converged) { int ierr; @@@ -186,11 -186,10 +186,10 @@@ // get number of converged eigenstates ierr = EPSGetConverged (solver_data->eps, - reinterpret_cast(n_converged) - ); + reinterpret_cast(n_converged)); AssertThrow (ierr == 0, ExcSLEPcError(ierr)); - int n_iterations = 0; + PetscInt n_iterations = 0; double residual_norm = 1e300; // @todo Investigate elaborating on some of this to act on the @@@ -219,9 -218,9 +218,9 @@@ } void - SolverBase::get_eigenpair (const unsigned int index, + SolverBase::get_eigenpair (const size_type index, - double &kr, - PETScWrappers::VectorBase &vr) + double &eigenvalues, + PETScWrappers::VectorBase &eigenvectors) { AssertThrow (solver_data.get() != 0, ExcSLEPcWrappersUsageError()); diff --cc deal.II/source/multigrid/mg_transfer_prebuilt.cc index 4c7031eac3,871cb0f37f..94b5cf7b32 --- a/deal.II/source/multigrid/mg_transfer_prebuilt.cc +++ b/deal.II/source/multigrid/mg_transfer_prebuilt.cc @@@ -139,10 -141,13 +141,13 @@@ void MGTransferPrebuilt::build_ // element will be stored CompressedSimpleSparsityPattern csp (sizes[level+1], sizes[level]); - std::vector entries (dofs_per_cell); + std::vector entries (dofs_per_cell); for (typename DoFHandler::cell_iterator cell=mg_dof.begin(level); cell != mg_dof.end(level); ++cell) - if (cell->has_children()) + if (cell->has_children() && + ( mg_dof.get_tria().locally_owned_subdomain()==numbers::invalid_subdomain_id + || cell->level_subdomain_id()==mg_dof.get_tria().locally_owned_subdomain() + )) { cell->get_mg_dof_indices (dof_indices_parent); @@@ -210,67 -229,35 +229,35 @@@ true); } } + prolongation_matrices[level]->compress(VectorOperation::insert); } - - // impose boundary conditions - // but only in the column of - // the prolongation matrix - if (mg_constrained_dofs != 0) - if (mg_constrained_dofs->set_boundary_values()) - { - std::vector constrain_indices; - for (int level=n_levels-2; level>=0; --level) - { - if (mg_constrained_dofs->get_boundary_indices()[level].size() == 0) - continue; - - // need to delete all the columns in the - // matrix that are on the boundary. to achieve - // this, create an array as long as there are - // matrix columns, and find which columns we - // need to filter away. - constrain_indices.resize (0); - constrain_indices.resize (prolongation_matrices[level]->n(), 0); - std::set::const_iterator dof - = mg_constrained_dofs->get_boundary_indices()[level].begin(), - endd = mg_constrained_dofs->get_boundary_indices()[level].end(); - for (; dof != endd; ++dof) - constrain_indices[*dof] = 1; - - const types::global_dof_index n_dofs = prolongation_matrices[level]->m(); - for (types::global_dof_index i=0; i::Matrix::iterator - start_row = prolongation_matrices[level]->begin(i), - end_row = prolongation_matrices[level]->end(i); - for (; start_row != end_row; ++start_row) - { - if (constrain_indices[start_row->column()] == 1) - start_row->value() = 0; - } - } - } - } - - // to find the indices that describe the - // relation between global dofs and local - // numbering on the individual level, first - // create a temp vector where the ith level - // entry contains the respective global - // entry. this gives a neat way to find those - // indices. in a second step, actually build - // the std::vector > that - // only contains the active dofs on the - // levels. + // Now we are filling the variables copy_indices*, which are essentially + // maps from global to mg dof for each level stored as a std::vector of + // pairs. We need to split this map on each level depending on the ownership + // of the global and mg dof, so that we later not access non-local elements + // in copy_to/from_mg. + // Here we keep track in the bitfield dof_touched which global dof has + // been processed already (otherwise we would get dublicates on each level + // and on different levels). Note that it is important that we iterate + // the levels starting from 0, so that mg dofs on coarser levels "win". copy_indices.resize(n_levels); - std::vector temp_copy_indices; + copy_indices_from_me.resize(n_levels); + copy_indices_to_me.resize(n_levels); + IndexSet globally_relevant; + DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant); + std::vector dof_touched(globally_relevant.n_elements(), false); + - std::vector global_dof_indices (dofs_per_cell); - std::vector level_dof_indices (dofs_per_cell); + std::vector global_dof_indices (dofs_per_cell); + std::vector level_dof_indices (dofs_per_cell); - for (int level=mg_dof.get_tria().n_levels()-1; level>=0; --level) + // for (int level=mg_dof.get_tria().n_levels()-1; level>=0; --level) + for (unsigned int level=0; level::active_cell_iterator level_cell = mg_dof.begin_active(level); const typename DoFHandler::active_cell_iterator @@@ -292,34 -276,47 +276,47 @@@ for (unsigned int i=0; iat_refinement_edge(level,level_dof_indices[i])) - temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; - } + // we need to ignore if the DoF is on a refinement edge (hanging node) + if (mg_constrained_dofs != 0 + && mg_constrained_dofs->at_refinement_edge(level, level_dof_indices[i])) + continue; + + unsigned int global_idx = globally_relevant.index_within_set(global_dof_indices[i]); + //skip if we did this global dof already (on this or a coarser level) + if (dof_touched[global_idx]) + continue; + + bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]); + bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]); + + if (global_mine && level_mine) + copy_indices[level].push_back( + std::pair (global_dof_indices[i], level_dof_indices[i])); + else if (level_mine) + copy_indices_from_me[level].push_back( + std::pair (global_dof_indices[i], level_dof_indices[i])); + else if (global_mine) + copy_indices_to_me[level].push_back( + std::pair (global_dof_indices[i], level_dof_indices[i])); else - temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; + continue; + + dof_touched[global_idx] = true; } } - - // now all the active dofs got a valid entry, - // the other ones have an invalid entry. Count - // the invalid entries and then resize the - // copy_indices object. Then, insert the pairs - // of global index and level index into - // copy_indices. - const types::global_dof_index n_active_dofs = - std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(), - std::bind2nd(std::not_equal_to(), - numbers::invalid_dof_index)); - copy_indices[level].resize (n_active_dofs); - types::global_dof_index counter = 0; - for (types::global_dof_index i=0; i (temp_copy_indices[i], i); - Assert (counter == n_active_dofs, ExcInternalError()); } + + // If we are in debugging mode, we order the copy indices, so we get + // more reliable output for regression texts + #ifdef DEBUG - std::less > compare; ++ std::less > compare; + for (unsigned int level=0;level