<h3>Specific improvements</h3>
<ol>
+ <li>
+ Changed: distributed::parallel:BlockVector::operator= now allows importing
+ of ghost values like all other vector types. Also added some new constructors
+ for BlockVector and Vector using IndexSets to mirror the other linear algebra
+ classes.
+ <br>
+ (Timo Heister, 2013/09/04)
+ </li>
+
<li>
Fixed: VectorTools::compute_no_normal_flux_constraints had a bug that
only manifested on complex meshes. This is now fixed.
*/
BlockVector (const std::vector<size_type> &block_sizes);
+ /**
+ * Construct a block vector with an IndexSet for the local range
+ * and ghost entries for each block.
+ */
+ BlockVector (const std::vector<IndexSet> &local_ranges,
+ const std::vector<IndexSet> &ghost_indices,
+ const MPI_Comm communicator);
+
+ /**
+ * Same as above but the ghost indicies are assumed to be empty.
+ */
+ BlockVector (const std::vector<IndexSet> &local_ranges,
+ const MPI_Comm communicator);
+
/**
* Destructor. Clears memory.
*/
}
+ template <typename Number>
+ inline
+ BlockVector<Number>::BlockVector (const std::vector<IndexSet> &local_ranges,
+ const std::vector<IndexSet> &ghost_indices,
+ const MPI_Comm communicator)
+ {
+ std::vector<size_type> sizes(local_ranges.size());
+ for (unsigned int i=0; i<local_ranges.size(); ++i)
+ sizes[i] = local_ranges[i].size();
+
+ this->block_indices.reinit(sizes);
+ this->components.resize(this->n_blocks());
+
+ for (unsigned int i=0; i<this->n_blocks(); ++i)
+ this->block(i).reinit(local_ranges[i], ghost_indices[i], communicator);
+ }
+
+
+ template <typename Number>
+ inline
+ BlockVector<Number>::BlockVector (const std::vector<IndexSet> &local_ranges,
+ const MPI_Comm communicator)
+ {
+ std::vector<size_type> sizes(local_ranges.size());
+ for (unsigned int i=0; i<local_ranges.size(); ++i)
+ sizes[i] = local_ranges[i].size();
+
+ this->block_indices.reinit(sizes);
+ this->components.resize(this->n_blocks());
+
+ for (unsigned int i=0; i<this->n_blocks(); ++i)
+ this->block(i).reinit(local_ranges[i], communicator);
+ }
+
+
template <typename Number>
inline
BlockVector<Number> &
BlockVector<Number>::operator = (const BlockVector &v)
{
- reinit (v, true);
- BaseClass::operator = (v);
+ // we only allow assignment to vectors with the same number of blocks
+ // or to an empty BlockVector
+ Assert (this->n_blocks() == 0 || this->n_blocks() == v.n_blocks(),
+ ExcDimensionMismatch(this->n_blocks(), v.n_blocks()));
+
+ if (this->n_blocks() != v.n_blocks())
+ reinit(v.n_blocks(), true);
+
+ for (size_type i=0; i<this->n_blocks(); ++i)
+ this->components[i] = v.block(i);
+
+ this->collect_sizes();
return *this;
}
const IndexSet &ghost_indices,
const MPI_Comm communicator);
+ /**
+ * Same constructor as above but without any ghost indices.
+ */
+ Vector (const IndexSet &local_range,
+ const MPI_Comm communicator);
+
/**
* Create the vector based on the parallel partitioning described in @p
* partitioner. The input argument is a shared pointer, which store the
const IndexSet &ghost_indices,
const MPI_Comm communicator);
+ /**
+ * Same as above, but without ghost entries.
+ */
+ void reinit (const IndexSet &local_range,
+ const MPI_Comm communicator);
+
/**
* Initialize the vector given to the parallel partitioning described in
* @p partitioner. The input argument is a shared pointer, which store
+ template <typename Number>
+ inline
+ Vector<Number>::Vector (const IndexSet &local_range,
+ const MPI_Comm communicator)
+ :
+ allocated_size (0),
+ val (0),
+ import_data (0),
+ vector_view (0, static_cast<Number *>(0))
+ {
+ IndexSet ghost_indices(local_range.size());
+ reinit (local_range, ghost_indices, communicator);
+ }
+
+
+
template <typename Number>
inline
Vector<Number>::Vector (const size_type size)
const IndexSet &ghost_indices,
const MPI_Comm communicator)
{
- // set up parallel partitioner with index sets
- // and communicator
+ // set up parallel partitioner with index sets and communicator
+ std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> new_partitioner
+ (new Utilities::MPI::Partitioner (locally_owned_indices,
+ ghost_indices, communicator));
+ reinit (new_partitioner);
+ }
+
+
+
+ template <typename Number>
+ void
+ Vector<Number>::reinit (const IndexSet &locally_owned_indices,
+ const MPI_Comm communicator)
+ {
+ // set up parallel partitioner with index sets and communicator
+ IndexSet ghost_indices(locally_owned_indices.size());
std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> new_partitioner
(new Utilities::MPI::Partitioner (locally_owned_indices,
ghost_indices, communicator));