--- /dev/null
+New: LinearAlgebra::distributed::BlockVector::reinit now accepts
+a vector of Utilities::MPI::Partitioner objects.
+<br>
+(Marc Fehling, 2022/12/20)
BlockVector(const std::vector<IndexSet> &local_ranges,
const MPI_Comm & communicator);
+ /**
+ * Construct a block vector with a Utilities::MPI::Partitioner for each
+ * block.
+ *
+ * The optional argument @p comm_sm, which consists of processes on
+ * the same shared-memory domain, allows users have read-only access to
+ * both locally-owned and ghost values of processes combined in the
+ * shared-memory communicator. See the general documentation of the
+ * LinearAlgebra::distributed::Vector class for more information about
+ * this argument.
+ */
+ BlockVector(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
+
/**
* Destructor.
*
reinit(const std::vector<IndexSet> &local_ranges,
const MPI_Comm & communicator);
+ /**
+ * Initialize each block with the corresponding parallel partitioning
+ * in @p partitioners. The input arguments are shared pointers, which
+ * store the partitioner data only once and can be shared between several
+ * vectors with the same layout.
+ *
+ * The optional argument @p comm_sm, which consists of processes on
+ * the same shared-memory domain, allows users have read-only access to
+ * both locally-owned and ghost values of processes combined in the
+ * shared-memory communicator. See the general documentation of the
+ * LinearAlgebra::distributed::Vector class for more information about
+ * this argument.
+ */
+ void
+ reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
+
/**
* This function copies the data that has accumulated in the data buffer
* for ghost indices to the owning processor. For the meaning of the
+ template <typename Number>
+ BlockVector<Number>::BlockVector(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const MPI_Comm &comm_sm)
+ {
+ reinit(partitioners, comm_sm);
+ }
+
+
+
template <typename Number>
BlockVector<Number>::BlockVector(const BlockVector<Number> &v)
: BlockVectorBase<Vector<Number>>()
+ template <typename Number>
+ void
+ BlockVector<Number>::reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const MPI_Comm &comm_sm)
+ {
+ // update the number of blocks
+ this->block_indices.reinit(partitioners.size(), 0);
+
+ // initialize each block
+ this->components.resize(this->n_blocks());
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ this->components[i].reinit(partitioners[i], comm_sm);
+
+ // update block_indices content
+ this->collect_sizes();
+ }
+
+
+
template <typename Number>
BlockVector<Number> &
BlockVector<Number>::operator=(const value_type s)
/**
* Initialize the vector given to the parallel partitioning described in
* @p partitioner. The input argument is a shared pointer, which stores
- * the partitioner data only once and share it between several vectors
- * with the same layout.
+ * the partitioner data only once and can be shared between several
+ * vectors with the same layout.
*
* The optional argument @p comm_sm, which consists of processes on
* the same shared-memory domain, allows users have read-only access to
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// test various constructors and reinit functions
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_block_vector.h>
+
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ const unsigned int myid = Utilities::MPI::this_mpi_process(comm);
+ const unsigned int n_procs = Utilities::MPI::n_mpi_processes(comm);
+
+ constexpr unsigned int n_blocks = 3;
+ constexpr unsigned int n_indices_per_proc_and_block = 2;
+
+ const unsigned int n_indices_per_block =
+ n_indices_per_proc_and_block * n_procs;
+ const unsigned int n_indices = n_blocks * n_indices_per_block;
+
+ // set up partitioning
+ std::vector<IndexSet> owned_indexsets;
+ std::vector<IndexSet> relevant_indexsets;
+ std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>> partitioners;
+ for (unsigned int b = 0; b < n_blocks; ++b)
+ {
+ const unsigned int begin = b * n_indices_per_block;
+
+ IndexSet owned(n_indices);
+ owned.add_range(begin + n_indices_per_proc_and_block * myid,
+ begin + n_indices_per_proc_and_block * (myid + 1));
+
+ IndexSet relevant(n_indices);
+ relevant.add_range(begin + 1, begin + 2);
+
+ partitioners.push_back(
+ std::make_shared<const Utilities::MPI::Partitioner>(owned,
+ relevant,
+ comm));
+ owned_indexsets.push_back(std::move(owned));
+ relevant_indexsets.push_back(std::move(relevant));
+ }
+
+ // create block vectors using different constructors
+ {
+ LinearAlgebra::distributed::BlockVector<double> block_vector(
+ owned_indexsets, comm);
+ deallog << "w/o ghost indices: OK" << std::endl;
+ }
+
+ {
+ LinearAlgebra::distributed::BlockVector<double> block_vector(
+ owned_indexsets, relevant_indexsets, comm);
+ deallog << "w/ ghost indices: OK" << std::endl;
+ }
+
+ {
+ LinearAlgebra::distributed::BlockVector<double> block_vector(partitioners);
+ deallog << "partitioner: OK" << std::endl;
+ }
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ initlog();
+
+ test();
+}
--- /dev/null
+
+DEAL::w/o ghost indices: OK
+DEAL::w/ ghost indices: OK
+DEAL::partitioner: OK