* Same as above but the ghost indices are assumed to be empty.
*/
BlockVector(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
+ /**
+ * Construct a block vector with a Utilities::MPI::Partitioner for each
+ * block.
+ *
+ * The optional argument @p comm_sm, which consists of processes on
+ * the same shared-memory domain, allows users have read-only access to
+ * both locally-owned and ghost values of processes combined in the
+ * shared-memory communicator. See the general documentation of the
+ * LinearAlgebra::distributed::Vector class for more information about
+ * this argument.
+ */
+ BlockVector(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
+
/**
* Destructor.
*
*/
void
reinit(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
+ /**
+ * Initialize each block with the corresponding parallel partitioning
+ * in @p partitioners. The input arguments are shared pointers, which
+ * store the partitioner data only once and can be shared between several
+ * vectors with the same layout.
+ *
+ * The optional argument @p comm_sm, which consists of processes on
+ * the same shared-memory domain, allows users have read-only access to
+ * both locally-owned and ghost values of processes combined in the
+ * shared-memory communicator. See the general documentation of the
+ * LinearAlgebra::distributed::Vector class for more information about
+ * this argument.
+ */
+ void
+ reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
+
/**
* This function copies the data that has accumulated in the data buffer
* for ghost indices to the owning processor. For the meaning of the