--- /dev/null
+New: All parallel BlockVector classes now have a reinit function that takes
+collections of Utilities::MPI::Partitioner objects as an argument. This
+affects TrilinosWrappers::MPI::BlockVector, PETScWrappers::MPI::BlockVector,
+and LinearAlgebra::distributed::BlockVector. The interface is compatible.
+<br>
+(Marc Fehling, 2023/06/02)
& partitioners,
const MPI_Comm &comm_sm = MPI_COMM_SELF);
+ /**
+ * This function exists purely for reasons of compatibility with the
+ * PETScWrappers::MPI::Vector and TrilinosWrappers::MPI::Vector classes.
+ *
+ * It calls the function above, and ignores the parameter @p make_ghosted.
+ */
+ void
+ reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const bool make_ghosted,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
+
/**
* This function copies the data that has accumulated in the data buffer
* for ghost indices to the owning processor. For the meaning of the
+ template <typename Number>
+ void
+ BlockVector<Number>::reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ &partitioners,
+ const bool /*make_ghosted*/,
+ const MPI_Comm &comm_sm)
+ {
+ reinit(partitioners, comm_sm);
+ }
+
+
+
template <typename Number>
BlockVector<Number> &
BlockVector<Number>::operator=(const value_type s)
const std::vector<IndexSet> &ghost_entries,
const MPI_Comm communicator);
+ /**
+ * Initialize each block given to each parallel partitioning described in
+ * @p partitioners.
+ *
+ * You can decide whether your vector will contain ghost elements with
+ * @p make_ghosted.
+ */
+ void
+ reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const bool make_ghosted = true);
+
/**
* This function collects the sizes of the sub-objects and stores them
* in internal arrays, in order to be able to relay global indices into
+ inline void
+ BlockVector::reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const bool make_ghosted)
+ {
+ // update the number of blocks
+ this->block_indices.reinit(partitioners.size(), 0);
+
+ // initialize each block
+ this->components.resize(this->n_blocks());
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ this->components[i].reinit(partitioners[i], make_ghosted);
+
+ // update block_indices content
+ this->collect_sizes();
+ }
+
+
+
inline MPI_Comm
BlockVector::get_mpi_communicator() const
{
const MPI_Comm communicator = MPI_COMM_WORLD,
const bool vector_writable = false);
+ /**
+ * Initialize each block given to each parallel partitioning described in
+ * @p partitioners.
+ *
+ * You can decide whether your vector will contain ghost elements with
+ * @p make_ghosted.
+ *
+ * The parameter @p vector_writable only has effect on ghosted vectors
+ * and is ignored for non-ghosted vectors.
+ */
+ void
+ reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const bool make_ghosted = true,
+ const bool vector_writable = false);
/**
* Change the dimension to that of the vector <tt>V</tt>. The same
+ void
+ BlockVector::reinit(
+ const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+ & partitioners,
+ const bool make_ghosted,
+ const bool vector_writable)
+ {
+ // update the number of blocks
+ this->block_indices.reinit(partitioners.size(), 0);
+
+ // initialize each block
+ this->components.resize(this->n_blocks());
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ this->components[i].reinit(partitioners[i],
+ make_ghosted,
+ vector_writable);
+
+ // update block_indices content
+ this->collect_sizes();
+ }
+
+
+
void
BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
{
// ---------------------------------------------------------------------
//
-// Copyright (C) 2022 by the deal.II authors
+// Copyright (C) 2022 - 2023 by the deal.II authors
//
// This file is part of the deal.II library.
//
{
LinearAlgebra::distributed::BlockVector<double> block_vector(partitioners);
- deallog << "partitioner: OK" << std::endl;
+ deallog << "partitioners: OK" << std::endl;
}
}
DEAL::w/o ghost indices: OK
DEAL::w/ ghost indices: OK
-DEAL::partitioner: OK
+DEAL::partitioners: OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// test various constructors and reinit functions
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/petsc_block_vector.h>
+
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ const unsigned int myid = Utilities::MPI::this_mpi_process(comm);
+ const unsigned int n_procs = Utilities::MPI::n_mpi_processes(comm);
+
+ constexpr unsigned int n_blocks = 3;
+ constexpr unsigned int n_indices_per_proc_and_block = 2;
+
+ const unsigned int n_indices_per_block =
+ n_indices_per_proc_and_block * n_procs;
+
+ // set up partitioning
+ std::vector<IndexSet> owned_indexsets;
+ std::vector<IndexSet> relevant_indexsets;
+ std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>> partitioners;
+ for (unsigned int b = 0; b < n_blocks; ++b)
+ {
+ // For PETSc, the IndexSet for each block must be complete over all
+ // processes, i.e., their union has to start from zero and must cover all
+ // indices.
+
+ IndexSet owned(n_indices_per_block);
+ owned.add_range(n_indices_per_proc_and_block * myid,
+ n_indices_per_proc_and_block * (myid + 1));
+
+ IndexSet relevant(n_indices_per_block);
+ relevant.add_range(1, 2);
+
+ partitioners.push_back(
+ std::make_shared<const Utilities::MPI::Partitioner>(owned,
+ relevant,
+ comm));
+ owned_indexsets.push_back(std::move(owned));
+ relevant_indexsets.push_back(std::move(relevant));
+ }
+
+ // create block vectors using different constructors
+ PETScWrappers::MPI::BlockVector block_vector;
+
+ block_vector.reinit(owned_indexsets, comm);
+ AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+ deallog << "w/o ghost indices: OK" << std::endl;
+
+ block_vector.reinit(owned_indexsets, relevant_indexsets, comm);
+ AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+ deallog << "w/ ghost indices: OK" << std::endl;
+
+ block_vector.reinit(partitioners, /*make_ghosted=*/false);
+ AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+ deallog << "partitioners w/o ghost indices: OK" << std::endl;
+
+ block_vector.reinit(partitioners, /*make_ghosted=*/true);
+ AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+ deallog << "partitioners w/ ghost indices: OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ initlog();
+
+ test();
+}
--- /dev/null
+
+DEAL::w/o ghost indices: OK
+DEAL::w/ ghost indices: OK
+DEAL::partitioners w/o ghost indices: OK
+DEAL::partitioners w/ ghost indices: OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// test various constructors and reinit functions
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ const unsigned int myid = Utilities::MPI::this_mpi_process(comm);
+ const unsigned int n_procs = Utilities::MPI::n_mpi_processes(comm);
+
+ constexpr unsigned int n_blocks = 3;
+ constexpr unsigned int n_indices_per_proc_and_block = 2;
+
+ const unsigned int n_indices_per_block =
+ n_indices_per_proc_and_block * n_procs;
+ const unsigned int n_indices = n_blocks * n_indices_per_block;
+
+ // set up partitioning
+ std::vector<IndexSet> owned_indexsets;
+ std::vector<IndexSet> relevant_indexsets;
+ std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>> partitioners;
+ for (unsigned int b = 0; b < n_blocks; ++b)
+ {
+ const unsigned int begin = b * n_indices_per_block;
+
+ IndexSet owned(n_indices);
+ owned.add_range(begin + n_indices_per_proc_and_block * myid,
+ begin + n_indices_per_proc_and_block * (myid + 1));
+
+ IndexSet relevant(n_indices);
+ relevant.add_range(begin + 1, begin + 2);
+
+ partitioners.push_back(
+ std::make_shared<const Utilities::MPI::Partitioner>(owned,
+ relevant,
+ comm));
+ owned_indexsets.push_back(std::move(owned));
+ relevant_indexsets.push_back(std::move(relevant));
+ }
+
+ // create block vectors using different constructors
+ TrilinosWrappers::MPI::BlockVector block_vector;
+
+ block_vector.reinit(owned_indexsets, comm);
+ AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+ deallog << "w/o ghost indices: OK" << std::endl;
+
+ block_vector.reinit(owned_indexsets, relevant_indexsets, comm);
+ AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+ deallog << "w/ ghost indices: OK" << std::endl;
+
+ block_vector.reinit(partitioners, /*make_ghosted=*/false);
+ AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+ deallog << "partitioners w/o ghost indices: OK" << std::endl;
+
+ block_vector.reinit(partitioners, /*make_ghosted=*/true);
+ AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+ deallog << "partitioners w/ ghost indices: OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ initlog();
+
+ test();
+}
--- /dev/null
+
+DEAL::w/o ghost indices: OK
+DEAL::w/ ghost indices: OK
+DEAL::partitioners w/o ghost indices: OK
+DEAL::partitioners w/ ghost indices: OK