face_system_to_base_index (const unsigned int index) const;
/**
- * Given a base element number,
- * return the first block of a
- * BlockVector it would generate.
+ * Given a base element number, return the first block of a BlockVector it
+ * would generate.
*/
- unsigned int first_block_of_base (const unsigned int b) const;
+ types::global_dof_index first_block_of_base (const unsigned int b) const;
/**
- * For each vector component,
- * return which base
- * element implements this
- * component and which vector
- * component in this base element
- * this is. This information is
- * only of interest for
- * vector-valued finite elements
- * which are composed of several
- * sub-elements. In that case,
- * one may want to obtain
- * information about the element
- * implementing a certain vector
- * component, which can be done
- * using this function and the
- * FESystem::base_element()
- * function.
+ * For each vector component, return which base element implements this
+ * component and which vector component in this base element this is. This
+ * information is only of interest for vector-valued finite elements which
+ * are composed of several sub-elements. In that case, one may want to
+ * obtain information about the element implementing a certain vector
+ * component, which can be done using this function and the
+ * FESystem::base_element() function.
*
- * If this is a scalar finite
- * element, then the return value
- * is always equal to a pair of
- * zeros.
+ * If this is a scalar finite element, then the return value is always equal
+ * to a pair of zeros.
*/
std::pair<unsigned int, unsigned int>
component_to_base_index (const unsigned int component) const;
block_to_base_index (const unsigned int block) const;
/**
- * The vector block and the index
- * inside the block for this
- * shape function.
+ * The vector block and the index inside the block for this shape function.
*/
- std::pair<unsigned int,unsigned int>
+ std::pair<unsigned int,types::global_dof_index>
system_to_block_index (const unsigned int component) const;
/**
* is the sum of the dimensions of all
* components.
*/
- unsigned int size () const;
+ std::size_t size () const;
+ /**
+ * Return an index set that describes which elements of this vector
+ * are owned by the current processor. Note that this index set does
+ * not include elements this vector may store locally as ghost
+ * elements but that are in fact owned by another processor.
+ * As a consequence, the index sets returned on different
+ * processors if this is a distributed vector will form disjoint
+ * sets that add up to the complete index set.
+ * Obviously, if a vector is created on only one processor, then
+ * the result would satisfy
+ * @code
+ * vec.locally_owned_elements() == complete_index_set (vec.size())
+ * @endcode
+ *
+ * For block vectors, this function returns the union of the
+ * locally owned elements of the individual blocks, shifted by
+ * their respective index offsets.
+ */
+ IndexSet locally_owned_elements () const;
+
/**
* Return an iterator pointing to
* the first element.
const unsigned int end_row,
const number *values,
const std::size_t *rowstart,
-- const unsigned int *colnums,
++ const size_type *colnums,
const InVector &src,
OutVector &dst)
{
-- const unsigned int m = cols.n_rows();
-- const unsigned int n = cols.n_cols();
-- const unsigned int chunk_size = cols.get_chunk_size();
++ const size_type m = cols.n_rows();
++ const size_type n = cols.n_cols();
++ const size_type chunk_size = cols.get_chunk_size();
// loop over all chunks. note that we need to treat the last chunk row
// and column differently if they have padding elements
-- const unsigned int n_filled_last_rows = m % chunk_size;
-- const unsigned int n_filled_last_cols = n % chunk_size;
++ const size_type n_filled_last_rows = m % chunk_size;
++ const size_type n_filled_last_cols = n % chunk_size;
-- const unsigned int last_regular_row = n_filled_last_rows > 0 ?
-- std::min(m/chunk_size, end_row) : end_row;
-- const unsigned int irregular_col = n/chunk_size;
++ const size_type last_regular_row = n_filled_last_rows > 0 ?
++ std::min(m/chunk_size,
++ static_cast<size_type>(end_row)) :
++ end_row;
++ const size_type irregular_col = n/chunk_size;
typename OutVector::iterator dst_ptr = dst.begin()+chunk_size*begin_row;
const number *val_ptr= &values[rowstart[begin_row]*chunk_size*chunk_size];
-- const unsigned int *colnum_ptr = &colnums[rowstart[begin_row]];
++ const size_type *colnum_ptr = &colnums[rowstart[begin_row]];
for (unsigned int chunk_row=begin_row; chunk_row<last_regular_row;
++chunk_row)
{
dst_ptr);
else
// we're at a chunk column that has padding
-- for (unsigned int r=0; r<chunk_size; ++r)
-- for (unsigned int c=0; c<n_filled_last_cols; ++c)
++ for (size_type r=0; r<chunk_size; ++r)
++ for (size_type c=0; c<n_filled_last_cols; ++c)
dst_ptr[r] += (val_ptr[r*chunk_size + c] *
src(*colnum_ptr * chunk_size + c));
// now deal with last chunk row if necessary
if (n_filled_last_rows > 0 && end_row == (m/chunk_size+1))
{
-- const unsigned int chunk_row = last_regular_row;
++ const size_type chunk_row = last_regular_row;
const number *const val_end_of_row = &values[rowstart[chunk_row+1] *
chunk_size * chunk_size];
if (*colnum_ptr != irregular_col)
{
// we're at a chunk row but not column that has padding
-- for (unsigned int r=0; r<n_filled_last_rows; ++r)
-- for (unsigned int c=0; c<chunk_size; ++c)
++ for (size_type r=0; r<n_filled_last_rows; ++r)
++ for (size_type c=0; c<chunk_size; ++c)
dst_ptr[r]
+= (val_ptr[r*chunk_size + c] *
src(*colnum_ptr * chunk_size + c));
}
else
// we're at a chunk row and column that has padding
-- for (unsigned int r=0; r<n_filled_last_rows; ++r)
-- for (unsigned int c=0; c<n_filled_last_cols; ++c)
++ for (size_type r=0; r<n_filled_last_rows; ++r)
++ for (size_type c=0; c<n_filled_last_cols; ++c)
dst_ptr[r]
+= (val_ptr[r*chunk_size + c] *
src(*colnum_ptr * chunk_size + c));
vec(i) = 0;
}
- void set_zero_parallel(const dealii::ConstraintMatrix &cm, parallel::distributed::Vector<Number> &vec, unsigned int shift = 0)
+ // TODO: in general we should iterate over the constraints and not over all DoFs
+ // for performance reasons
+ template<typename Number>
++ void set_zero_parallel(const dealii::ConstraintMatrix &cm, parallel::distributed::Vector<Number> &vec, size_type shift = 0)
+ {
+ for (unsigned int i=0; i<vec.local_size(); ++i)
+ if (cm.is_constrained (shift + vec.local_range().first+i))
+ vec.local_element(i) = 0;
+ vec.zero_out_ghosts();
+ }
+
template<class VEC>
void set_zero_in_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, internal::bool2type<false>)
{
typedef typename BaseClass::const_iterator const_iterator;
/**
- * Constructor. There are three
- * ways to use this
- * constructor. First, without
- * any arguments, it generates
- * an object with no
- * blocks. Given one argument,
- * it initializes <tt>num_blocks</tt>
- * blocks, but these blocks have
- * size zero. The third variant
- * finally initializes all
- * blocks to the same size
- * <tt>block_size</tt>.
+ * Constructor. There are three ways to use this constructor. First,
+ * without any arguments, it generates an object with no blocks. Given
+ * one argument, it initializes <tt>num_blocks</tt> blocks, but these
+ * blocks have size zero. The third variant finally initializes all
+ * blocks to the same size <tt>block_size</tt>.
*
- * Confer the other constructor
- * further down if you intend to
- * use blocks of different
- * sizes.
+ * Confer the other constructor further down if you intend to use
+ * blocks of different sizes.
*/
- explicit BlockVector (const unsigned int num_blocks = 0,
- const unsigned int block_size = 0);
+ explicit BlockVector (const size_type num_blocks = 0,
+ const size_type block_size = 0);
/**
- * Copy-Constructor. Dimension set to
- * that of V, all components are copied
- * from V
+ * Copy-Constructor. Dimension set to that of V, all components are
+ * copied from V
*/
BlockVector (const BlockVector<Number> &V);
#endif
/**
- * Constructor. Set the number of
- * blocks to
- * <tt>block_sizes.size()</tt> and
- * initialize each block with
- * <tt>block_sizes[i]</tt> zero
- * elements.
+ * Constructor. Set the number of blocks to <tt>block_sizes.size()</tt>
+ * and initialize each block with <tt>block_sizes[i]</tt> zero elements.
*/
- BlockVector (const std::vector<unsigned int> &block_sizes);
+ BlockVector (const std::vector<size_type> &block_sizes);
/**
- * Destructor. Clears memory
+ * Destructor. Clears memory.
*/
~BlockVector ();
operator= (const Vector<Number> &V);
/**
- * Reinitialize the BlockVector to
- * contain <tt>num_blocks</tt> blocks of
+ * Reinitialize the BlockVector to contain <tt>num_blocks</tt> blocks of
* size <tt>block_size</tt> each.
*
- * If the second argument is left
- * at its default value, then the
- * block vector allocates the
- * specified number of blocks but
- * leaves them at zero size. You
- * then need to later
- * reinitialize the individual
- * blocks, and call
- * collect_sizes() to update the
- * block system's knowledge of
+ * If the second argument is left at its default value, then the block
+ * vector allocates the specified number of blocks but leaves them at
+ * zero size. You then need to later reinitialize the individual blocks,
+ * and call collect_sizes() to update the block system's knowledge of
* its individual block's sizes.
*
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
+ * If <tt>fast==false</tt>, the vector is filled with zeros.
*/
- void reinit (const unsigned int num_blocks,
- const unsigned int block_size = 0,
+ void reinit (const size_type num_blocks,
+ const size_type block_size = 0,
const bool fast = false);
/**
- * Reinitialize the BlockVector such that
- * it contains
- * <tt>block_sizes.size()</tt>
- * blocks. Each block is reinitialized to
+ * Reinitialize the BlockVector such that it contains
+ * <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
* dimension <tt>block_sizes[i]</tt>.
*
- * If the number of blocks is the
- * same as before this function
- * was called, all vectors remain
- * the same and reinit() is
- * called for each vector.
+ * If the number of blocks is the same as before this function was
+ * called, all vectors remain the same and reinit() is called for each
+ * vector.
*
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
+ * If <tt>fast==false</tt>, the vector is filled with zeros.
*
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
+ * Note that you must call this (or the other reinit() functions)
+ * function, rather than calling the reinit() functions of an individual
+ * block, to allow the block vector to update its caches of vector
+ * sizes. If you call reinit() on one of the blocks, then subsequent
+ * actions on this object may yield unpredictable results since they may
+ * be routed to the wrong block.
*/
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
+ void reinit (const std::vector<size_type> &N,
+ const bool fast=false);
/**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
+ * Change the dimension to that of the vector <tt>V</tt>. The same
+ * applies as for the other reinit() function.
*
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
+ * The elements of <tt>V</tt> are not copied, i.e. this function is the
+ * same as calling <tt>reinit (V.size(), fast)</tt>.
*
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() of one of the
- * blocks, then subsequent
- * actions of this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
+ * Note that you must call this (or the other reinit() functions)
+ * function, rather than calling the reinit() functions of an individual
+ * block, to allow the block vector to update its caches of vector
+ * sizes. If you call reinit() of one of the blocks, then subsequent
+ * actions of this object may yield unpredictable results since they may
+ * be routed to the wrong block.
*/
template <typename Number2>
void reinit (const BlockVector<Number2> &V,
typedef const value_type *const_iterator;
typedef value_type &reference;
typedef const value_type &const_reference;
- typedef size_t size_type;
+ typedef types::global_dof_index size_type;
typedef typename numbers::NumberTraits<Number>::real_type real_type;
+ /**
+ * A variable that indicates whether this vector
+ * supports distributed data storage. If true, then
+ * this vector also needs an appropriate compress()
+ * function that allows communicating recent set or
+ * add operations to individual elements to be communicated
+ * to other processors.
+ *
+ * For the current class, the variable equals
+ * true, since it does support parallel data storage.
+ */
+ static const bool supports_distributed_data = true;
+
/**
* @name 1: Basic Object-handling
*/
Vector (const Vector<Number> &in_vector);
/**
- * Constructs a parallel vector of the given
- * global size without any actual parallel
- * distribution.
+ * Constructs a parallel vector of the given global size without any
+ * actual parallel distribution.
*/
- Vector (const unsigned int size);
+ Vector (const size_type size);
/**
- * Constructs a parallel vector. The local
- * range is specified by @p locally_owned_set
- * (note that this must be a contiguous
- * interval, multiple intervals are not
- * possible). The IndexSet @p ghost_indices
- * specifies ghost indices, i.e., indices
- * which one might need to read data from or
- * accumulate data from. It is allowed that
- * the set of ghost indices also contains the
- * local range, but it does not need to.
+ * Constructs a parallel vector. The local range is specified by @p
+ * locally_owned_set (note that this must be a contiguous interval,
+ * multiple intervals are not possible). The IndexSet @p ghost_indices
+ * specifies ghost indices, i.e., indices which one might need to read
+ * data from or accumulate data from. It is allowed that the set of
+ * ghost indices also contains the local range, but it does not need to.
*
- * This function involves global
- * communication, so it should only be called
- * once for a given layout. Use the
- * constructor with Vector<Number> argument to
- * create additional vectors with the same
+ * This function involves global communication, so it should only be
+ * called once for a given layout. Use the constructor with
+ * Vector<Number> argument to create additional vectors with the same
* parallel layout.
*/
Vector (const IndexSet &local_range,
~Vector ();
/**
- * Sets the global size of the vector to @p
- * size without any actual parallel
- * distribution.
+ * Sets the global size of the vector to @p size without any actual
+ * parallel distribution.
*/
- void reinit (const unsigned int size,
- const bool fast = false);
+ void reinit (const size_type size,
+ const bool fast = false);
/**
- * Uses the parallel layout of the input
- * vector @p in_vector and allocates memory
- * for this vector. Recommended initialization
- * function when several vectors with the same
- * layout should be created.
+ * Uses the parallel layout of the input vector @p in_vector and
+ * allocates memory for this vector. Recommended initialization function
+ * when several vectors with the same layout should be created.
*
- * If the flag @p fast is set to false, the
- * memory will be initialized with zero,
- * otherwise the memory will be untouched (and
- * the user must make sure to fill it with
- * reasonable data before using it).
+ * If the flag @p fast is set to false, the memory will be initialized
+ * with zero, otherwise the memory will be untouched (and the user must
+ * make sure to fill it with reasonable data before using it).
*/
template <typename Number2>
void reinit(const Vector<Number2> &in_vector,
real_type linfty_norm () const;
/**
- * Returns the global size of the vector,
- * equal to the sum of the number of locally
- * owned indices among all the processors.
+ * Returns the global size of the vector, equal to the sum of the number
+ * of locally owned indices among all the processors.
*/
- types::global_dof_index size () const;
+ size_type size () const;
/**
- * Returns the local size of the vector, i.e.,
- * the number of indices owned locally.
+ * Returns the local size of the vector, i.e., the number of indices
+ * owned locally.
*/
- unsigned int local_size() const;
+ size_type local_size() const;
/**
- * Returns the half-open interval that
- * specifies the locally owned range of the
- * vector. Note that <code>local_size() ==
- * local_range().second -
+ * Returns the half-open interval that specifies the locally owned range
+ * of the vector. Note that <code>local_size() == local_range().second -
* local_range().first</code>.
*/
- std::pair<types::global_dof_index, types::global_dof_index> local_range () const;
+ std::pair<size_type, size_type> local_range () const;
/**
- * Returns true if the given global index is
- * in the local range of this processor.
+ * Returns true if the given global index is in the local range of this
+ * processor.
*/
- bool in_local_range (const types::global_dof_index global_index) const;
+ bool in_local_range (const size_type global_index) const;
/**
- * Returns the number of ghost elements
- * present on the vector.
+ * Return an index set that describes which elements of this vector
+ * are owned by the current processor. Note that this index set does
+ * not include elements this vector may store locally as ghost
+ * elements but that are in fact owned by another processor.
+ * As a consequence, the index sets returned on different
+ * processors if this is a distributed vector will form disjoint
+ * sets that add up to the complete index set.
+ * Obviously, if a vector is created on only one processor, then
+ * the result would satisfy
+ * @code
+ * vec.locally_owned_elements() == complete_index_set (vec.size())
+ * @endcode
+ */
+ IndexSet locally_owned_elements () const;
+
+ /**
+ * Returns the number of ghost elements present on the vector.
*/
- unsigned int n_ghost_entries () const;
+ size_type n_ghost_entries () const;
/**
- * Returns whether the given global index is a
- * ghost index on the present
- * processor. Returns false for indices that
- * are owned locally and for indices not
- * present at all.
+ * Return an index set that describes which elements of this vector are
+ * not owned by the current processor but can be written into or read
+ * from locally (ghost elements).
*/
- bool is_ghost_entry (const size_type global_index) const;
+ const IndexSet& ghost_elements() const;
+
+ /**
+ * Returns whether the given global index is a ghost index on the
+ * present processor. Returns false for indices that are owned locally
+ * and for indices not present at all.
+ */
+ bool is_ghost_entry (const types::global_dof_index global_index) const;
/**
* Make the @p Vector class a bit like the <tt>vector<></tt> class of
//@{
/**
- * Read access to the data in the
- * position corresponding to @p
- * global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
+ * Read access to the data in the position corresponding to @p
+ * global_index. The index must be either in the local range of the
+ * vector or be specified as a ghost index at construction.
+ *
+ * Performance: <tt>O(1)</tt> for locally owned elements that represent
+ * a contiguous range and <tt>O(log(n<sub>ranges</sub>))</tt> for ghost
+ * elements (quite fast, but slower than local_element()).
*/
- Number operator () (const types::global_dof_index global_index) const;
+ Number operator () (const size_type global_index) const;
/**
- * Read and write access to the data
- * in the position corresponding to
- * @p global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
+ * Read and write access to the data in the position corresponding to @p
+ * global_index. The index must be either in the local range of the
+ * vector or be specified as a ghost index at construction.
+ *
+ * Performance: <tt>O(1)</tt> for locally owned elements that represent
+ * a contiguous range and <tt>O(log(n<sub>ranges</sub>))</tt> for ghost
+ * elements (quite fast, but slower than local_element()).
*/
- Number &operator () (const types::global_dof_index global_index);
+ Number &operator () (const size_type global_index);
/**
- * Read access to the data in the
- * position corresponding to @p
- * global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
+ * Read access to the data in the position corresponding to @p
+ * global_index. The index must be either in the local range of the
+ * vector or be specified as a ghost index at construction.
*
- * This function does the same thing
- * as operator().
+ * This function does the same thing as operator().
*/
- Number operator [] (const types::global_dof_index global_index) const;
+ Number operator [] (const size_type global_index) const;
/**
- * Read and write access to the data
- * in the position corresponding to
- * @p global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
+ * Read and write access to the data in the position corresponding to @p
+ * global_index. The index must be either in the local range of the
+ * vector or be specified as a ghost index at construction.
*
- * This function does the same thing
- * as operator().
+ * This function does the same thing as operator().
*/
- Number &operator [] (const types::global_dof_index global_index);
+ Number &operator [] (const size_type global_index);
/**
- * Read access to the data field specified by
- * @p local_index. Locally owned indices can
- * be accessed with indices
- * <code>[0,local_size)</code>, and ghost
- * indices with indices
- * <code>[local_size,local_size+
- * n_ghost_entries]</code>.
+ * Read access to the data field specified by @p local_index. Locally
+ * owned indices can be accessed with indices
+ * <code>[0,local_size)</code>, and ghost indices with indices
+ * <code>[local_size,local_size+ n_ghost_entries]</code>.
+ *
+ * Performance: Direct array access (fast).
*/
- Number local_element (const unsigned int local_index) const;
+ Number local_element (const size_type local_index) const;
/**
- * Read and write access to the data field
- * specified by @p local_index. Locally owned
- * indices can be accessed with indices
- * <code>[0,local_size)</code>, and ghost
- * indices with indices
+ * Read and write access to the data field specified by @p
+ * local_index. Locally owned indices can be accessed with indices
+ * <code>[0,local_size)</code>, and ghost indices with indices
* <code>[local_size,local_size+n_ghosts]</code>.
+ *
+ * Performance: Direct array access (fast).
*/
- Number &local_element (const unsigned int local_index);
+ Number &local_element (const size_type local_index);
//@}
Vector<Number> &operator -= (const Vector<Number> &V);
/**
- * A collective add operation:
- * This funnction adds a whole
- * set of values stored in @p
- * values to the vector
- * components specified by @p
- * indices.
+ * A collective add operation: This funnction adds a whole set of values
+ * stored in @p values to the vector components specified by @p indices.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const std::vector<OtherNumber> &values);
/**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
+ * This is a second collective add operation. As a difference, this
+ * function takes a deal.II vector of values.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
const ::dealii::Vector<OtherNumber> &values);
/**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
+ * Take an address where <tt>n_elements</tt> are stored contiguously and
+ * add them into the vector. Handles all cases which are not covered by
+ * the other two <tt>add()</tt> functions above.
*/
template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
+ void add (const size_type n_elements,
+ const size_type *indices,
const OtherNumber *values);
/**
std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
/**
- * The size that is currently allocated in the
- * val array.
+ * The size that is currently allocated in the val array.
*/
- unsigned int allocated_size;
+ size_type allocated_size;
/**
- * Pointer to the array of
- * local elements of this vector.
+ * Pointer to the array of local elements of this vector.
*/
Number *val;
void clear_mpi_requests ();
/**
- * A helper function that is used to resize
- * the val array.
+ * A helper function that is used to resize the val array.
*/
- void resize_val (const unsigned int new_allocated_size);
+ void resize_val (const size_type new_allocated_size);
/*
- * Make all other vector types
- * friends.
+ * Make all other vector types friends.
*/
template <typename Number2> friend class Vector;
+
+ /**
+ * Make BlockVector type friends.
+ */
+ template <typename Number2> friend class BlockVector;
};
/*@}*/
+ template <typename Number>
+ inline
+ IndexSet
+ Vector<Number>::locally_owned_elements() const
+ {
+ IndexSet is (size());
+
+ const std::pair<types::global_dof_index,types::global_dof_index> x = local_range();
+ is.add_range (x.first, x.second);
+
+ return is;
+ }
+
+
+
template <typename Number>
inline
- unsigned int
+ typename Vector<Number>::size_type
Vector<Number>::n_ghost_entries () const
{
return partitioner->n_ghost_indices();
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- const unsigned int n_import_targets = part.import_targets().size();
- const unsigned int n_ghost_targets = part.ghost_targets().size();
+ const size_type n_import_targets = part.import_targets().size();
+ const size_type n_ghost_targets = part.ghost_targets().size();
- AssertDimension (n_ghost_targets+n_import_targets,
- compress_requests.size());
+ if (operation != dealii::VectorOperation::insert)
+ AssertDimension (n_ghost_targets+n_import_targets,
+ compress_requests.size());
// first wait for the receive to complete
- if (n_import_targets > 0)
+ if (compress_requests.size() > 0 && n_import_targets > 0)
{
int ierr;
ierr = MPI_Waitall (n_import_targets, &compress_requests[0],
Assert (ierr == MPI_SUCCESS, ExcInternalError());
Number *read_position = import_data;
- std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+ std::vector<std::pair<size_type, size_type> >::const_iterator
my_imports = part.import_indices().begin();
- // If add_ghost_data is set, add the imported
- // data to the local values. If not, set the
- // vector entries.
- if (add_ghost_data == true)
+ // If the operation is no insertion, add the imported data to the
+ // local values. For insert, nothing is done here (but in debug mode
+ // we assert that the specified value is either zero or matches with
+ // the ones already present
+ if (operation != dealii::VectorOperation::insert)
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+ for (size_type j=my_imports->first; j<my_imports->second; j++)
local_element(j) += *read_position++;
else
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (size_type j=my_imports->first; j<my_imports->second; j++)
- local_element(j) = *read_position++;
- for (unsigned int j=my_imports->first; j<my_imports->second;
++ for (size_type j=my_imports->first; j<my_imports->second;
+ j++, read_position++)
+ Assert(*read_position == 0. ||
+ std::abs(local_element(j) - *read_position) <
+ std::abs(local_element(j)) * 100. *
+ std::numeric_limits<Number>::epsilon(),
+ ExcMessage("Inserted elements do not match."));
AssertDimension(read_position-import_data,part.n_import_indices());
}
class Vector : public VectorBase
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef types::global_dof_index size_type;
+
/**
+ * A variable that indicates whether this vector
+ * supports distributed data storage. If true, then
+ * this vector also needs an appropriate compress()
+ * function that allows communicating recent set or
+ * add operations to individual elements to be communicated
+ * to other processors.
+ *
+ * For the current class, the variable equals
+ * true, since it does support parallel data storage.
+ */
+ static const bool supports_distributed_data = true;
+
+ /**
* Default constructor. Initialize the
* vector as empty.
*/
class Vector : public VectorBase
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef types::global_dof_index size_type;
+
+ /**
+ * A variable that indicates whether this vector
+ * supports distributed data storage. If true, then
+ * this vector also needs an appropriate compress()
+ * function that allows communicating recent set or
+ * add operations to individual elements to be communicated
+ * to other processors.
+ *
+ * For the current class, the variable equals
+ * false, since it does not support parallel data storage.
+ * If you do need parallel data storage, use
+ * PETScWrappers::MPI::Vector.
+ */
+ static const bool supports_distributed_data = false;
+
/**
* Default constructor. Initialize the
* vector as empty.
* in the local range or not,
* see also local_range().
*/
- bool in_local_range (const unsigned int index) const;
+ bool in_local_range (const size_type index) const;
+ /**
+ * Return an index set that describes which elements of this vector
+ * are owned by the current processor. Note that this index set does
+ * not include elements this vector may store locally as ghost
+ * elements but that are in fact owned by another processor.
+ * As a consequence, the index sets returned on different
+ * processors if this is a distributed vector will form disjoint
+ * sets that add up to the complete index set.
+ * Obviously, if a vector is created on only one processor, then
+ * the result would satisfy
+ * @code
+ * vec.locally_owned_elements() == complete_index_set (vec.size())
+ * @endcode
+ */
+ IndexSet locally_owned_elements () const;
+
/**
* Return if the vector contains ghost
* elements.
&begin, &end);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- return ((index >= static_cast<unsigned int>(begin)) &&
- (index < static_cast<unsigned int>(end)));
+ return ((index >= static_cast<size_type>(begin)) &&
+ (index < static_cast<size_type>(end)));
}
+
+ inline
+ IndexSet
+ VectorBase::locally_owned_elements() const
+ {
+ IndexSet is (size());
+
+ // PETSc only allows for contiguous local ranges, so this is simple
+ const std::pair<unsigned int, unsigned int> x = local_range();
+ is.add_range (x.first, x.second);
+ return is;
+ }
+
+
+
inline
bool
VectorBase::has_ghost_elements() const
class PreconditionChebyshev : public Subscriptor
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef types::global_dof_index size_type;
+
/**
- * Standardized data struct to
- * pipe additional parameters
- * to the preconditioner.
+ * Standardized data struct to pipe additional parameters to the
+ * preconditioner.
*/
struct AdditionalData
{
template <typename OutputVector>
void
solve (const PETScWrappers::MatrixBase &A,
- std::vector<double> &r_eigenvalues,
- std::vector<OutputVector> &r_eigenvectors = std::vector<OutputVector> (),
- const size_type n_eigenvectors = 1);
+ std::vector<double> &eigenvalues,
+ std::vector<OutputVector> &eigenvectors,
- const unsigned int n_eigenpairs = 1);
++ const size_type n_eigenpairs = 1);
/**
* Same as above, but here a composite method for solving the
void
solve (const PETScWrappers::MatrixBase &A,
const PETScWrappers::MatrixBase &B,
- std::vector<double> &r_eigenvalues,
- std::vector<OutputVector> &r_eigenvectors = std::vector<OutputVector> (),
- const size_type n_eigenvectors = 1);
+ std::vector<double> &eigenvalues,
+ std::vector<OutputVector> &eigenvectors,
- const unsigned int n_eigenpairs = 1);
++ const size_type n_eigenpairs = 1);
+
+ /**
+ * Same as above, but here a composite method for solving the
+ * system $A x=\lambda B x$ with real matrices $A, B$ and
+ * imaginary eigenpairs $x, \lamda$.
+ */
+ template <typename OutputVector>
+ void
+ solve (const PETScWrappers::MatrixBase &A,
+ const PETScWrappers::MatrixBase &B,
+ std::vector<double> &real_eigenvalues,
+ std::vector<double> &imag_eigenvalues,
+ std::vector<OutputVector> &real_eigenvectors,
+ std::vector<OutputVector> &imag_eigenvectors,
+ const unsigned int n_eigenpairs = 1);
/**
* Set the initial vector for the solver.
* This is declared here to make it possible to take a std::vector
* of different PETScWrappers vector types
*/
+ // todo: The logic of these functions can be simplified without breaking backward compatibility...
template <typename OutputVector>
- void
- SolverBase::solve (const PETScWrappers::MatrixBase &A,
- std::vector<double> &kr,
- std::vector<OutputVector> &vr,
- const size_type n_eigenvectors)
- {
- // Panic if the number of eigenpairs wanted is out of bounds.
- AssertThrow ((n_eigenvectors > 0) && (n_eigenvectors <= A.m ()),
- ExcSLEPcWrappersUsageError());
-
- // Set the matrices of the problem
- set_matrices (A);
-
- // and solve
- unsigned int n_converged = 0;
- solve (n_eigenvectors, &n_converged);
-
- if (n_converged > n_eigenvectors)
- n_converged = n_eigenvectors;
- AssertThrow (n_converged == n_eigenvectors,
- ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenvectors));
-
- AssertThrow (vr.size() != 0, ExcSLEPcWrappersUsageError());
- vr.resize (n_converged, vr.front());
- kr.resize (n_converged);
-
- for (size_type index=0; index<n_converged; ++index)
- get_eigenpair (index, kr[index], vr[index]);
- }
-
+ void
+ SolverBase::solve (const PETScWrappers::MatrixBase &A,
+ std::vector<double> &eigenvalues,
+ std::vector<OutputVector> &eigenvectors,
- const unsigned int n_eigenpairs)
++ const size_type n_eigenpairs)
+ {
+ // Panic if the number of eigenpairs wanted is out of bounds.
+ AssertThrow ((n_eigenpairs > 0) && (n_eigenpairs <= A.m ()),
+ ExcSLEPcWrappersUsageError());
+
+ // Set the matrices of the problem
+ set_matrices (A);
+
+ // and solve
- unsigned int n_converged = 0;
++ size_type n_converged = 0;
+ solve (n_eigenpairs, &n_converged);
+
+ if (n_converged > n_eigenpairs)
+ n_converged = n_eigenpairs;
+ AssertThrow (n_converged == n_eigenpairs,
+ ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
+
+ AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError());
+ eigenvectors.resize (n_converged, eigenvectors.front());
+ eigenvalues.resize (n_converged);
+
- for (unsigned int index=0; index<n_converged; ++index)
++ for (size_type index=0; index<n_converged; ++index)
+ get_eigenpair (index, eigenvalues[index], eigenvectors[index]);
+ }
+
template <typename OutputVector>
- void
+ void
SolverBase::solve (const PETScWrappers::MatrixBase &A,
const PETScWrappers::MatrixBase &B,
- std::vector<double> &kr,
- std::vector<OutputVector> &vr,
- const size_type n_eigenvectors)
- {
- // Guard against incompatible matrix sizes:
- AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m()));
- AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n()));
+ std::vector<double> &eigenvalues,
+ std::vector<OutputVector> &eigenvectors,
- const unsigned int n_eigenpairs)
++ const size_type n_eigenpairs)
+ {
+ // Guard against incompatible matrix sizes:
+ AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m()));
+ AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n()));
+
+ // Panic if the number of eigenpairs wanted is out of bounds.
+ AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()),
+ ExcSLEPcWrappersUsageError());
+
+ // Set the matrices of the problem
+ set_matrices (A, B);
+
+ // and solve
- unsigned int n_converged = 0;
++ size_type n_converged = 0;
+ solve (n_eigenpairs, &n_converged);
+
+ if (n_converged>=n_eigenpairs)
+ n_converged = n_eigenpairs;
+
+ AssertThrow (n_converged==n_eigenpairs,
+ ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
+ AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError());
+
+ eigenvectors.resize (n_converged, eigenvectors.front());
+ eigenvalues.resize (n_converged);
+
- for (unsigned int index=0; index<n_converged; ++index)
++ for (size_type index=0; index<n_converged; ++index)
+ get_eigenpair (index, eigenvalues[index], eigenvectors[index]);
+ }
- // Panic if the number of eigenpairs wanted is out of bounds.
- AssertThrow ((n_eigenvectors > 0) && (n_eigenvectors <= A.m ()),
- ExcSLEPcWrappersUsageError());
-
- // Set the matrices of the problem
- set_matrices (A, B);
-
- // and solve
- unsigned int n_converged = 0;
- solve (n_eigenvectors, &n_converged);
-
- if (n_converged >= n_eigenvectors)
- n_converged = n_eigenvectors;
-
- AssertThrow (n_converged == n_eigenvectors,
- ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenvectors));
- AssertThrow (vr.size() != 0, ExcSLEPcWrappersUsageError());
-
- vr.resize (n_converged, vr.front());
- kr.resize (n_converged);
+ template <typename OutputVector>
+ void
+ SolverBase::solve (const PETScWrappers::MatrixBase &A,
+ const PETScWrappers::MatrixBase &B,
+ std::vector<double> &real_eigenvalues,
+ std::vector<double> &imag_eigenvalues,
+ std::vector<OutputVector> &real_eigenvectors,
+ std::vector<OutputVector> &imag_eigenvectors,
- const unsigned int n_eigenpairs)
++ const size_type n_eigenpairs)
+ {
+ // Guard against incompatible matrix sizes:
+ AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m()));
+ AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n()));
+
+ // and incompatible eigenvalue/eigenvector sizes
+ AssertThrow (real_eigenvalues.size() == imag_eigenvalues.size(),
+ ExcDimensionMismatch(real_eigenvalues.size(), imag_eigenvalues.size()));
+ AssertThrow (real_eigenvectors.size() == imag_eigenvectors.n (),
+ ExcDimensionMismatch(real_eigenvectors.size(), imag_eigenvectors.size()));
+
+ // Panic if the number of eigenpairs wanted is out of bounds.
+ AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()),
+ ExcSLEPcWrappersUsageError());
+
+ // Set the matrices of the problem
+ set_matrices (A, B);
+
+ // and solve
- unsigned int n_converged = 0;
++ size_type n_converged = 0;
+ solve (n_eigenpairs, &n_converged);
+
+ if (n_converged>=n_eigenpairs)
+ n_converged = n_eigenpairs;
+
+ AssertThrow (n_converged==n_eigenpairs,
+ ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
+ AssertThrow ((real_eigenvectors.size()!=0) && (imag_eigenvectors.size()!=0),
+ ExcSLEPcWrappersUsageError());
+
+ real_eigenvectors.resize (n_converged, real_eigenvectors.front());
+ imag_eigenvectors.resize (n_converged, imag_eigenvectors.front());
+ real_eigenvalues.resize (n_converged);
+ imag_eigenvalues.resize (n_converged);
+
- for (unsigned int index=0; index<n_converged; ++index)
++ for (size_type index=0; index<n_converged; ++index)
+ get_eigenpair (index,
+ real_eigenvalues[index], imag_eigenvalues[index],
+ real_eigenvectors[index], imag_eigenvectors[index]);
+ }
- for (size_type index=0; index<n_converged; ++index)
- get_eigenpair (index, kr[index], vr[index]);
- }
}
DEAL_II_NAMESPACE_CLOSE
last_action = Insert;
- int *col_index_ptr;
+ TrilinosWrappers::types::int_type *col_index_ptr;
TrilinosScalar *col_value_ptr;
- int n_columns;
+ TrilinosWrappers::types::int_type n_columns;
TrilinosScalar short_val_array[100];
-- int short_index_array[100];
++ TrilinosWrappers::types::int_type short_index_array[100];
std::vector<TrilinosScalar> long_val_array;
-- std::vector<int> long_index_array;
++ std::vector<TrilinosWrappers::types::int_type> long_index_array;
// If we don't elide zeros, the pointers are already available... need to
last_action = Add;
- int *col_index_ptr;
+ TrilinosWrappers::types::int_type *col_index_ptr;
TrilinosScalar *col_value_ptr;
- int n_columns;
+ TrilinosWrappers::types::int_type n_columns;
double short_val_array[100];
-- int short_index_array[100];
++ TrilinosWrappers::types::int_type short_index_array[100];
std::vector<TrilinosScalar> long_val_array;
-- std::vector<int> long_index_array;
++ std::vector<TrilinosWrappers::types::int_type> long_index_array;
// If we don't elide zeros, the pointers are already available... need to
// cast to non-const pointers as that is the format taken by Trilinos (but
Assert (matrix->Filled(), ExcMatrixNotCompressed());
internal::SparseMatrix::check_vector_map_equality(*matrix, src, dst);
- const int dst_local_size = dst.end() - dst.begin();
- AssertDimension (dst_local_size, matrix->RangeMap().NumMyElements());
- const int src_local_size = src.end() - src.begin();
- AssertDimension (src_local_size, matrix->DomainMap().NumMyElements());
+ const size_type dst_local_size = dst.end() - dst.begin();
+ AssertDimension (dst_local_size, static_cast<size_type>(matrix->RangeMap().NumMyElements()));
+ const size_type src_local_size = src.end() - src.begin();
+ AssertDimension (src_local_size, static_cast<size_type>(matrix->DomainMap().NumMyElements()));
- Epetra_Vector tril_dst (View, matrix->RangeMap(), dst.begin());
- Epetra_Vector tril_src (View, matrix->DomainMap(),
- const_cast<TrilinosScalar *>(src.begin()));
+ Epetra_MultiVector tril_dst (View, matrix->RangeMap(), dst.begin(),
+ matrix->DomainMap().NumMyPoints(), 1);
+ Epetra_MultiVector tril_src (View, matrix->DomainMap(),
+ const_cast<TrilinosScalar *>(src.begin()),
+ matrix->DomainMap().NumMyPoints(), 1);
const int ierr = matrix->Multiply (false, tril_src, tril_dst);
Assert (ierr == 0, ExcTrilinosError(ierr));
Assert (matrix->Filled(), ExcMatrixNotCompressed());
internal::SparseMatrix::check_vector_map_equality(*matrix, dst, src);
- const int dst_local_size = dst.end() - dst.begin();
- AssertDimension (dst_local_size, matrix->DomainMap().NumMyElements());
- const int src_local_size = src.end() - src.begin();
- AssertDimension (src_local_size, matrix->RangeMap().NumMyElements());
+ const size_type dst_local_size = dst.end() - dst.begin();
+ AssertDimension (dst_local_size, static_cast<size_type>(matrix->DomainMap().NumMyElements()));
+ const size_type src_local_size = src.end() - src.begin();
+ AssertDimension (src_local_size, static_cast<size_type>(matrix->RangeMap().NumMyElements()));
- Epetra_Vector tril_dst (View, matrix->DomainMap(), dst.begin());
- Epetra_Vector tril_src (View, matrix->RangeMap(),
- const_cast<double *>(src.begin()));
+ Epetra_MultiVector tril_dst (View, matrix->DomainMap(), dst.begin(),
+ matrix->DomainMap().NumMyPoints(), 1);
+ Epetra_MultiVector tril_src (View, matrix->RangeMap(),
+ const_cast<double *>(src.begin()),
+ matrix->DomainMap().NumMyPoints(), 1);
const int ierr = matrix->Multiply (true, tril_src, tril_dst);
Assert (ierr == 0, ExcTrilinosError(ierr));
class Vector : public VectorBase
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef dealii::types::global_dof_index size_type;
+
+ /**
+ * A variable that indicates whether this vector
+ * supports distributed data storage. If true, then
+ * this vector also needs an appropriate compress()
+ * function that allows communicating recent set or
+ * add operations to individual elements to be communicated
+ * to other processors.
+ *
+ * For the current class, the variable equals
+ * true, since it does support parallel data storage.
+ */
+ static const bool supports_distributed_data = true;
+
/**
* @name Basic constructors and initalization.
*/
class Vector : public VectorBase
{
public:
+ /**
+ * Declare type for container size.
+ */
+ typedef dealii::types::global_dof_index size_type;
+
+ /**
+ * A variable that indicates whether this vector
+ * supports distributed data storage. If true, then
+ * this vector also needs an appropriate compress()
+ * function that allows communicating recent set or
+ * add operations to individual elements to be communicated
+ * to other processors.
+ *
+ * For the current class, the variable equals
+ * false, since it does not support parallel data storage.
+ * If you do need parallel data storage, use
+ * TrilinosWrappers::MPI::Vector.
+ */
+ static const bool supports_distributed_data = false;
+
/**
* Default constructor that
* generates an empty (zero size)
* @note The same limitation for the applicability of this
* function applies as listed in the documentation of local_range().
*/
- bool in_local_range (const unsigned int index) const;
+ bool in_local_range (const size_type index) const;
+ /**
+ * Return an index set that describes which elements of this vector
+ * are owned by the current processor. Note that this index set does
+ * not include elements this vector may store locally as ghost
+ * elements but that are in fact owned by another processor.
+ * As a consequence, the index sets returned on different
+ * processors if this is a distributed vector will form disjoint
+ * sets that add up to the complete index set.
+ * Obviously, if a vector is created on only one processor, then
+ * the result would satisfy
+ * @code
+ * vec.locally_owned_elements() == complete_index_set (vec.size())
+ * @endcode
+ */
+ IndexSet locally_owned_elements () const;
+
/**
* Return if the vector contains ghost
* elements. This answer is true if there
* Since this is not a distributed
* vector the method always returns true.
*/
- bool in_local_range (const types::global_dof_index global_index) const;
+ bool in_local_range (const size_type global_index) const;
+ /**
+ * Return an index set that describes which elements of this vector
+ * are owned by the current processor. Note that this index set does
+ * not include elements this vector may store locally as ghost
+ * elements but that are in fact owned by another processor.
+ * As a consequence, the index sets returned on different
+ * processors if this is a distributed vector will form disjoint
+ * sets that add up to the complete index set.
+ * Obviously, if a vector is created on only one processor, then
+ * the result would satisfy
+ * @code
+ * vec.locally_owned_elements() == complete_index_set (vec.size())
+ * @endcode
+ *
+ * Since the current data type does not support parallel data storage
+ * across different processors, the returned index set is the
+ * complete index set.
+ */
+ IndexSet locally_owned_elements () const;
+
/**
* Return dimension of the vector.
*/
}
template<typename T>
- void copy_subrange (const unsigned int begin,
- const unsigned int end,
+ void copy_subrange (const size_type begin,
+ const size_type end,
const dealii::Vector<T> &src,
- dealii::Vector<T> &dst)
+ dealii::Vector<T> &dst)
{
memcpy(&*(dst.begin()+begin), &*(src.begin()+begin),
(end-begin)*sizeof(T));
}
template<typename T, typename U>
- void copy_subrange (const unsigned int begin,
- const unsigned int end,
+ void copy_subrange (const size_type begin,
+ const size_type end,
const dealii::Vector<T> &src,
- dealii::Vector<U> &dst)
+ dealii::Vector<U> &dst)
{
const T *q = src.begin()+begin;
const T *const end_q = src.begin()+end;
}
template<typename T, typename U>
- void copy_subrange_wrap (const unsigned int begin,
- const unsigned int end,
+ void copy_subrange_wrap (const size_type begin,
+ const size_type end,
const dealii::Vector<T> &src,
- dealii::Vector<U> &dst)
+ dealii::Vector<U> &dst)
{
copy_subrange (begin, end, src, dst);
}
template <typename T, typename U>
void copy_vector (const dealii::Vector<T> &src,
- dealii::Vector<U> &dst)
+ dealii::Vector<U> &dst)
{
- const unsigned int vec_size = src.size();
- const unsigned int dst_size = dst.size();
+ if (PointerComparison::equal(&src, &dst))
+ return;
+
+ const size_type vec_size = src.size();
+ const size_type dst_size = dst.size();
if (dst_size != vec_size)
dst.reinit (vec_size, true);
if (vec_size>internal::Vector::minimum_parallel_grain_size)
/**
* Sizes of the multi-level vectors.
*/
- std::vector<unsigned int> sizes;
+ std::vector<types::global_dof_index> sizes;
/**
- * Sparsity patterns for transfer
- * matrices.
+ * Sparsity patterns for transfer matrices.
*/
std::vector<std_cxx1x::shared_ptr<typename internal::MatrixSelector<VECTOR>::Sparsity> > prolongation_sparsities;
std::vector<std_cxx1x::shared_ptr<typename internal::MatrixSelector<VECTOR>::Matrix> > prolongation_matrices;
/**
- * Mapping for the
- * <tt>copy_to/from_mg</tt>-functions.
- * The data is first the global
- * index, then the level index.
+ * Mapping for the copy_to_mg() and copy_from_mg() functions. Here only
+ * index pairs locally owned
+ *
+ * The data is organized as follows: one vector per level. Each
+ * element of these vectors contains first the global index, then
+ * the level index.
*/
- std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
+ std::vector<std::vector<std::pair<types::global_dof_index, unsigned int> > >
copy_indices;
- std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
+ /**
+ * Additional degrees of freedom for the copy_to_mg()
+ * function. These are the ones where the global degree of freedom
+ * is locally owned and the level degree of freedom is not.
+ *
+ * Organization of the data is like for #copy_indices_mine.
+ */
- std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
++ std::vector<std::vector<std::pair<types::global_dof_index, unsigned int> > >
+ copy_indices_to_me;
+
+ /**
+ * Additional degrees of freedom for the copy_from_mg()
+ * function. These are the ones where the level degree of freedom
+ * is locally owned and the global degree of freedom is not.
+ *
+ * Organization of the data is like for #copy_indices_mine.
+ */
++ std::vector<std::vector<std::pair<types::global_dof_index, unsigned int> > >
+ copy_indices_from_me;
+
+
/**
* The vector that stores what
* has been given to the
// have fine level basis
// functions
dst = 0;
- for (unsigned int level=0; level<mg_dof_handler.get_tria().n_levels(); ++level)
+ for (unsigned int level=0; level<mg_dof_handler.get_tria().n_global_levels(); ++level)
{
- typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator IT;
+ typedef std::vector<std::pair<types::global_dof_index, unsigned int> >::const_iterator IT;
- if (constraints == 0)
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- dst(i->first) = src[level](i->second);
+ // First copy all indices local to this process
+ if (constraints==0)
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ dst(i->first) = src[level](i->second);
else
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+
+ // Do the same for the indices where the level index is local,
+ // but the global index is not
+ if (constraints==0)
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ dst(i->first) = src[level](i->second);
+ else
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
}
}
// to the coarse level, but
// have fine level basis
// functions
- for (unsigned int level=0; level<mg_dof_handler.get_tria().n_levels(); ++level)
+ for (unsigned int level=0; level<mg_dof_handler.get_tria().n_global_levels(); ++level)
{
- typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator IT;
+ typedef std::vector<std::pair<types::global_dof_index, unsigned int> >::const_iterator IT;
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- dst(i->first) += src[level](i->second);
+ if (constraints==0)
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ dst(i->first) += src[level](i->second);
+ else
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+
+ // Do the same for the indices where the level index is local,
+ // but the global index is not
+ if (constraints==0)
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ dst(i->first) += src[level](i->second);
+ else
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
}
}
// check if constraints are compatible (see below)
bool constraints_are_compatible = true;
{
-- for (std::map<unsigned int,double>::iterator it=boundary_values.begin();
++ for (std::map<types::global_dof_index,double>::iterator it=boundary_values.begin();
it != boundary_values.end(); ++it)
if (constraints.is_constrained(it->first))
if (!(constraints.get_constraint_entries(it->first)->size() > 0
- template<typename FaceIterator>
+ namespace
+ {
+ // enter constraints for periodicity into the given ConstraintMatrix object.
+ // this function is called when at least one of the two face iterators corresponds
+ // to an active object without further children
+ //
+ // @param transformation A matrix that maps degrees of freedom from one face
+ // to another. If the DoFs on the two faces are supposed to match exactly, then
+ // the matrix so provided will be the identity matrix. if face 2 is once refined
+ // from face 1, then the matrix needs to be the interpolation matrix from a face
+ // to this particular child
+ //
+ // @precondition: face_1 is supposed to be active
+ template <typename FaceIterator>
+ void
+ set_periodicity_constraints (const FaceIterator &face_1,
+ const typename identity<FaceIterator>::type &face_2,
+ const FullMatrix<double> &transformation,
+ dealii::ConstraintMatrix &constraint_matrix,
+ const ComponentMask &component_mask,
+ const bool face_orientation,
+ const bool face_flip,
+ const bool face_rotation)
+ {
+ static const int dim = FaceIterator::AccessorType::dimension;
+ static const int spacedim = FaceIterator::AccessorType::space_dimension;
+
+ // we should be in the case where face_1 is active, i.e. has no children:
+ Assert (!face_1->has_children(),
+ ExcInternalError());
+
+ Assert (face_1->n_active_fe_indices() == 1,
+ ExcInternalError());
+
+ // if face_2 does have children, then we need to iterate over them
+ if (face_2->has_children())
+ {
+ Assert (face_2->n_children() == GeometryInfo<dim>::max_children_per_face,
+ ExcNotImplemented());
+ const unsigned int dofs_per_face
+ = face_1->get_fe(face_1->nth_active_fe_index(0)).dofs_per_face;
+ FullMatrix<double> child_transformation (dofs_per_face, dofs_per_face);
+ FullMatrix<double> subface_interpolation (dofs_per_face, dofs_per_face);
+ for (unsigned int c=0; c<face_2->n_children(); ++c)
+ {
+ // get the interpolation matrix recursively from the one that
+ // interpolated from face_1 to face_2 by multiplying from the
+ // left with the one that interpolates from face_2 to
+ // its child
+ face_1->get_fe(face_1->nth_active_fe_index(0))
+ .get_subface_interpolation_matrix (face_1->get_fe(face_1->nth_active_fe_index(0)),
+ c,
+ subface_interpolation);
+ subface_interpolation.mmult (child_transformation, transformation);
+ set_periodicity_constraints(face_1, face_2->child(c),
+ child_transformation,
+ constraint_matrix, component_mask,
+ face_orientation, face_flip, face_rotation);
+ }
+ }
+ else
+ // both faces are active. we need to match the corresponding DoFs of both faces
+ {
+ const unsigned int face_1_index = face_1->nth_active_fe_index(0);
+ const unsigned int face_2_index = face_2->nth_active_fe_index(0);
+ Assert(face_1->get_fe(face_1_index) == face_2->get_fe(face_1_index),
+ ExcMessage ("Matching periodic cells need to use the same finite element"));
+
+ const FiniteElement<dim, spacedim> &fe = face_1->get_fe(face_1_index);
+
+ Assert(component_mask.represents_n_components(fe.n_components()),
+ ExcMessage ("The number of components in the mask has to be either "
+ "zero or equal to the number of components in the finite " "element."));
+
+ const unsigned int dofs_per_face = fe.dofs_per_face;
+
- std::vector<unsigned int> dofs_1(dofs_per_face);
- std::vector<unsigned int> dofs_2(dofs_per_face);
++ std::vector<types::global_dof_index> dofs_1(dofs_per_face);
++ std::vector<types::global_dof_index> dofs_2(dofs_per_face);
+
+ face_1->get_dof_indices(dofs_1, face_1_index);
+ face_2->get_dof_indices(dofs_2, face_2_index);
+
+ // Well, this is a hack:
+ //
+ // There is no
+ // face_to_face_index(face_index,
+ // face_orientation,
+ // face_flip,
+ // face_rotation)
+ // function in FiniteElementData, so we have to use
+ // face_to_cell_index(face_index, face
+ // face_orientation,
+ // face_flip,
+ // face_rotation)
+ // But this will give us an index on a cell - something we cannot work
+ // with directly. But luckily we can match them back :-]
+
+ std::map<unsigned int, unsigned int> cell_to_rotated_face_index;
+
+ // Build up a cell to face index for face_2:
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int cell_index = fe.face_to_cell_index(i, 0, /* It doesn't really matter, just assume
+ * we're on the first face...
+ */
+ true, false, false // default orientation
+ );
+ cell_to_rotated_face_index[cell_index] = i;
+ }
+
+ // loop over all dofs on face 2 and constrain them again the ones on face 1
+ for (unsigned int i=0; i<dofs_per_face; ++i)
+ if (!constraint_matrix.is_constrained(dofs_2[i]))
+ if ((component_mask.n_selected_components(fe.n_components())
+ == fe.n_components())
+ ||
+ component_mask[fe.face_system_to_component_index(i).first])
+ {
+ constraint_matrix.add_line(dofs_2[i]);
+ for (unsigned int jj=0; jj<dofs_per_face; ++jj)
+ {
+ // Query the correct face_index on face_2 respecting the given
+ // orientation:
+ const unsigned int j =
+ cell_to_rotated_face_index[fe.face_to_cell_index(jj, 0, /* It doesn't really matter, just assume
+ * we're on the first face...
+ */
+ face_orientation, face_flip, face_rotation)];
+
+ // And finally constrain the two DoFs respecting component_mask:
+ if (transformation(i,j) != 0)
+ constraint_matrix.add_entry(dofs_2[i], dofs_1[j],
+ transformation(i,j));
+ }
+ }
+ }
+ }
+ }
+
+
+ template <typename FaceIterator>
void
make_periodicity_constraints (const FaceIterator &face_1,
const typename identity<FaceIterator>::type &face_2,
Assert (col_entry != weights[first_used_row].end(), ExcInternalError());
if ((col_entry->second == 1) &&
-- (representants[first_used_row] == static_cast<int>(global_dof)))
++ (representants[first_used_row] == global_dof))
// dof unconstrained or constrained to itself (in case this
// cell is mapped to itself, rather than to children of
// itself)
// explicit instantiations
template
void ChunkSparsityPattern::copy_from<SparsityPattern> (const SparsityPattern &,
-- const unsigned int,
++ const size_type,
const bool);
template
void ChunkSparsityPattern::copy_from<CompressedSparsityPattern> (const CompressedSparsityPattern &,
}
void
- SolverBase::solve (const size_type n_eigenvectors,
- SolverBase::solve (const unsigned int n_eigenpairs,
- unsigned int *n_converged)
++ SolverBase::solve (const size_type n_eigenpairs,
+ size_type *n_converged)
{
int ierr;
// get number of converged eigenstates
ierr = EPSGetConverged (solver_data->eps,
- reinterpret_cast<PetscInt *>(n_converged)
- );
+ reinterpret_cast<PetscInt *>(n_converged));
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
- int n_iterations = 0;
+ PetscInt n_iterations = 0;
double residual_norm = 1e300;
// @todo Investigate elaborating on some of this to act on the
}
void
- SolverBase::get_eigenpair (const unsigned int index,
+ SolverBase::get_eigenpair (const size_type index,
- double &kr,
- PETScWrappers::VectorBase &vr)
+ double &eigenvalues,
+ PETScWrappers::VectorBase &eigenvectors)
{
AssertThrow (solver_data.get() != 0, ExcSLEPcWrappersUsageError());
// element will be stored
CompressedSimpleSparsityPattern csp (sizes[level+1],
sizes[level]);
- std::vector<unsigned int> entries (dofs_per_cell);
+ std::vector<types::global_dof_index> entries (dofs_per_cell);
for (typename DoFHandler<dim,spacedim>::cell_iterator cell=mg_dof.begin(level);
cell != mg_dof.end(level); ++cell)
- if (cell->has_children())
+ if (cell->has_children() &&
+ ( mg_dof.get_tria().locally_owned_subdomain()==numbers::invalid_subdomain_id
+ || cell->level_subdomain_id()==mg_dof.get_tria().locally_owned_subdomain()
+ ))
{
cell->get_mg_dof_indices (dof_indices_parent);
true);
}
}
+ prolongation_matrices[level]->compress(VectorOperation::insert);
}
-
- // impose boundary conditions
- // but only in the column of
- // the prolongation matrix
- if (mg_constrained_dofs != 0)
- if (mg_constrained_dofs->set_boundary_values())
- {
- std::vector<types::global_dof_index> constrain_indices;
- for (int level=n_levels-2; level>=0; --level)
- {
- if (mg_constrained_dofs->get_boundary_indices()[level].size() == 0)
- continue;
-
- // need to delete all the columns in the
- // matrix that are on the boundary. to achieve
- // this, create an array as long as there are
- // matrix columns, and find which columns we
- // need to filter away.
- constrain_indices.resize (0);
- constrain_indices.resize (prolongation_matrices[level]->n(), 0);
- std::set<types::global_dof_index>::const_iterator dof
- = mg_constrained_dofs->get_boundary_indices()[level].begin(),
- endd = mg_constrained_dofs->get_boundary_indices()[level].end();
- for (; dof != endd; ++dof)
- constrain_indices[*dof] = 1;
-
- const types::global_dof_index n_dofs = prolongation_matrices[level]->m();
- for (types::global_dof_index i=0; i<n_dofs; ++i)
- {
- typename internal::MatrixSelector<VECTOR>::Matrix::iterator
- start_row = prolongation_matrices[level]->begin(i),
- end_row = prolongation_matrices[level]->end(i);
- for (; start_row != end_row; ++start_row)
- {
- if (constrain_indices[start_row->column()] == 1)
- start_row->value() = 0;
- }
- }
- }
- }
-
- // to find the indices that describe the
- // relation between global dofs and local
- // numbering on the individual level, first
- // create a temp vector where the ith level
- // entry contains the respective global
- // entry. this gives a neat way to find those
- // indices. in a second step, actually build
- // the std::vector<std::pair<uint,uint> > that
- // only contains the active dofs on the
- // levels.
+ // Now we are filling the variables copy_indices*, which are essentially
+ // maps from global to mg dof for each level stored as a std::vector of
+ // pairs. We need to split this map on each level depending on the ownership
+ // of the global and mg dof, so that we later not access non-local elements
+ // in copy_to/from_mg.
+ // Here we keep track in the bitfield dof_touched which global dof has
+ // been processed already (otherwise we would get dublicates on each level
+ // and on different levels). Note that it is important that we iterate
+ // the levels starting from 0, so that mg dofs on coarser levels "win".
copy_indices.resize(n_levels);
- std::vector<types::global_dof_index> temp_copy_indices;
+ copy_indices_from_me.resize(n_levels);
+ copy_indices_to_me.resize(n_levels);
+ IndexSet globally_relevant;
+ DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant);
+ std::vector<bool> dof_touched(globally_relevant.n_elements(), false);
+
- std::vector<unsigned int> global_dof_indices (dofs_per_cell);
- std::vector<unsigned int> level_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> global_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> level_dof_indices (dofs_per_cell);
- for (int level=mg_dof.get_tria().n_levels()-1; level>=0; --level)
+ // for (int level=mg_dof.get_tria().n_levels()-1; level>=0; --level)
+ for (unsigned int level=0; level<mg_dof.get_tria().n_levels(); ++level)
{
copy_indices[level].clear();
+ copy_indices_from_me[level].clear();
+ copy_indices_to_me[level].clear();
+
typename DoFHandler<dim,spacedim>::active_cell_iterator
level_cell = mg_dof.begin_active(level);
const typename DoFHandler<dim,spacedim>::active_cell_iterator
for (unsigned int i=0; i<dofs_per_cell; ++i)
{
- if (mg_constrained_dofs != 0)
- {
- if (!mg_constrained_dofs->at_refinement_edge(level,level_dof_indices[i]))
- temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
- }
+ // we need to ignore if the DoF is on a refinement edge (hanging node)
+ if (mg_constrained_dofs != 0
+ && mg_constrained_dofs->at_refinement_edge(level, level_dof_indices[i]))
+ continue;
+
+ unsigned int global_idx = globally_relevant.index_within_set(global_dof_indices[i]);
+ //skip if we did this global dof already (on this or a coarser level)
+ if (dof_touched[global_idx])
+ continue;
+
+ bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]);
+ bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]);
+
+ if (global_mine && level_mine)
+ copy_indices[level].push_back(
+ std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
+ else if (level_mine)
+ copy_indices_from_me[level].push_back(
+ std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
+ else if (global_mine)
+ copy_indices_to_me[level].push_back(
+ std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
else
- temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
+ continue;
+
+ dof_touched[global_idx] = true;
}
}
-
- // now all the active dofs got a valid entry,
- // the other ones have an invalid entry. Count
- // the invalid entries and then resize the
- // copy_indices object. Then, insert the pairs
- // of global index and level index into
- // copy_indices.
- const types::global_dof_index n_active_dofs =
- std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
- std::bind2nd(std::not_equal_to<types::global_dof_index>(),
- numbers::invalid_dof_index));
- copy_indices[level].resize (n_active_dofs);
- types::global_dof_index counter = 0;
- for (types::global_dof_index i=0; i<temp_copy_indices.size(); ++i)
- if (temp_copy_indices[i] != numbers::invalid_dof_index)
- copy_indices[level][counter++] =
- std::pair<types::global_dof_index, unsigned int> (temp_copy_indices[i], i);
- Assert (counter == n_active_dofs, ExcInternalError());
}
- std::less<std::pair<unsigned int, unsigned int> > compare;
+
+ // If we are in debugging mode, we order the copy indices, so we get
+ // more reliable output for regression texts
+ #ifdef DEBUG
++ std::less<std::pair<types::global_dof_index, unsigned int> > compare;
+ for (unsigned int level=0;level<copy_indices.size();++level)
+ std::sort(copy_indices[level].begin(), copy_indices[level].end(), compare);
+ for (unsigned int level=0;level<copy_indices_from_me.size();++level)
+ std::sort(copy_indices_from_me[level].begin(), copy_indices_from_me[level].end(), compare);
+ for (unsigned int level=0;level<copy_indices_to_me.size();++level)
+ std::sort(copy_indices_to_me[level].begin(), copy_indices_to_me[level].end(), compare);
+ #endif
}