--- /dev/null
+New: TrilinosWrappers::MPI::Vector and PETScWrappers::MPI::Vector now both have
+reinit functions that take a Utilities::MPI::Partitioner as an argument, so
+their interface is compatible to LinearAlgebra::distributed::Vector.
+<br>
+(Marc Fehling, 2022/12/05)
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
const MPI_Comm &comm_sm = MPI_COMM_SELF);
+ /**
+ * This function exists purely for reasons of compatibility with the
+ * PETScWrappers::MPI::Vector and TrilinosWrappers::MPI::Vector classes.
+ *
+ * It calls the function above, and ignores the parameter @p make_ghosted.
+ */
+ void
+ reinit(
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const bool make_ghosted,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
+
/**
* Initialize vector with @p local_size locally-owned and @p ghost_size
* ghost degrees of freedoms.
+ template <typename Number, typename MemorySpaceType>
+ void
+ Vector<Number, MemorySpaceType>::reinit(
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_in,
+ const bool /*make_ghosted*/,
+ const MPI_Comm &comm_sm)
+ {
+ this->reinit(partitioner_in, comm_sm);
+ }
+
+
+
template <typename Number, typename MemorySpaceType>
Vector<Number, MemorySpaceType>::Vector()
: partitioner(std::make_shared<Utilities::MPI::Partitioner>())
/**
* Initialize the vector given to the parallel partitioning described in
* @p partitioner.
+ *
+ * You can decide whether your vector will contain ghost elements with
+ * @p make_ghosted.
*/
void
reinit(
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const bool make_ghosted = true);
/**
* Return a reference to the MPI communicator object in use with this
/**
* Initialize the vector given to the parallel partitioning described in
* @p partitioner using the function above.
+ *
+ * You can decide whether your vector will contain ghost elements with
+ * @p make_ghosted.
+ *
+ * The parameter @p vector_writable only has effect on ghosted vectors
+ * and is ignored for non-ghosted vectors.
*/
void
reinit(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const bool make_ghosted = true,
const bool vector_writable = false);
/**
void
Vector::reinit(
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner)
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const bool make_ghosted)
{
- this->reinit(partitioner->locally_owned_range(),
- partitioner->ghost_indices(),
- partitioner->get_mpi_communicator());
+ if (make_ghosted)
+ {
+ Assert(partitioner->ghost_indices_initialized(),
+ ExcMessage("You asked to create a ghosted vector, but the "
+ "partitioner does not provide ghost indices."));
+
+ this->reinit(partitioner->locally_owned_range(),
+ partitioner->ghost_indices(),
+ partitioner->get_mpi_communicator());
+ }
+ else
+ {
+ this->reinit(partitioner->locally_owned_range(),
+ partitioner->get_mpi_communicator());
+ }
}
void
Vector::reinit(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const bool make_ghosted,
const bool vector_writable)
{
- this->reinit(partitioner->locally_owned_range(),
- partitioner->ghost_indices(),
- partitioner->get_mpi_communicator(),
- vector_writable);
+ if (make_ghosted)
+ {
+ Assert(partitioner->ghost_indices_initialized(),
+ ExcMessage("You asked to create a ghosted vector, but the "
+ "partitioner does not provide ghost indices."));
+
+ this->reinit(partitioner->locally_owned_range(),
+ partitioner->ghost_indices(),
+ partitioner->get_mpi_communicator(),
+ vector_writable);
+ }
+ else
+ {
+ this->reinit(partitioner->locally_owned_range(),
+ partitioner->get_mpi_communicator());
+ }
}