void reinit (const std::vector<IndexSet> ¶llel_partitioning,
const MPI_Comm &communicator = MPI_COMM_WORLD,
const bool fast = false);
+ /**
+ * like above, but with a second set of indices for
+ * ghost entries.
+ */
+ void reinit (const std::vector<IndexSet> &partitioning,
+ const std::vector<IndexSet> &ghost_values,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
/**
* Change the dimension to that
collect_sizes();
}
+ void
+ BlockVector::reinit (const std::vector<IndexSet> ¶llel_partitioning,
+ const std::vector<IndexSet> &ghost_values,
+ const MPI_Comm &communicator)
+ {
+ const size_type no_blocks = parallel_partitioning.size();
+ std::vector<size_type> block_sizes (no_blocks);
+
+ for (size_type i=0; i<no_blocks; ++i)
+ {
+ block_sizes[i] = parallel_partitioning[i].size();
+ }
+
+ this->block_indices.reinit (block_sizes);
+ if (components.size() != n_blocks())
+ components.resize(n_blocks());
+
+ for (size_type i=0; i<n_blocks(); ++i)
+ components[i].reinit(parallel_partitioning[i], ghost_values[i], communicator);
+
+ collect_sizes();
+ }
void