]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Choose to initialize BlockVector with ghost elements using reinit(partitioners). 15297/head
authorMarc Fehling <mafehling.git@gmail.com>
Sat, 3 Jun 2023 03:35:00 +0000 (21:35 -0600)
committerMarc Fehling <mafehling.git@gmail.com>
Sun, 4 Jun 2023 19:14:16 +0000 (13:14 -0600)
12 files changed:
doc/news/changes/minor/20230602Fehling [new file with mode: 0644]
include/deal.II/lac/la_parallel_block_vector.h
include/deal.II/lac/la_parallel_block_vector.templates.h
include/deal.II/lac/petsc_block_vector.h
include/deal.II/lac/trilinos_parallel_block_vector.h
source/lac/trilinos_block_vector.cc
tests/mpi/parallel_block_vector_reinit_01.cc [moved from tests/mpi/parallel_block_vector_14.cc with 96% similarity]
tests/mpi/parallel_block_vector_reinit_01.mpirun=10.output [moved from tests/mpi/parallel_block_vector_14.mpirun=10.output with 71% similarity]
tests/petsc/parallel_block_vector_reinit_01.cc [new file with mode: 0644]
tests/petsc/parallel_block_vector_reinit_01.mpirun=10.output [new file with mode: 0644]
tests/trilinos/parallel_block_vector_reinit_01.cc [new file with mode: 0644]
tests/trilinos/parallel_block_vector_reinit_01.mpirun=10.output [new file with mode: 0644]

diff --git a/doc/news/changes/minor/20230602Fehling b/doc/news/changes/minor/20230602Fehling
new file mode 100644 (file)
index 0000000..1b1fe6f
--- /dev/null
@@ -0,0 +1,6 @@
+New: All parallel BlockVector classes now have a reinit function that takes
+collections of Utilities::MPI::Partitioner objects as an argument. This
+affects TrilinosWrappers::MPI::BlockVector, PETScWrappers::MPI::BlockVector,
+and LinearAlgebra::distributed::BlockVector. The interface is compatible.
+<br>
+(Marc Fehling, 2023/06/02)
index 321ea64071bb70fa9061da6aada4255e6a0bac60..08b44c824f4b3c14ee678fe693c3db784a3f77cc 100644 (file)
@@ -363,6 +363,19 @@ namespace LinearAlgebra
           &             partitioners,
         const MPI_Comm &comm_sm = MPI_COMM_SELF);
 
+      /**
+       * This function exists purely for reasons of compatibility with the
+       * PETScWrappers::MPI::Vector and TrilinosWrappers::MPI::Vector classes.
+       *
+       * It calls the function above, and ignores the parameter @p make_ghosted.
+       */
+      void
+      reinit(
+        const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+          &             partitioners,
+        const bool      make_ghosted,
+        const MPI_Comm &comm_sm = MPI_COMM_SELF);
+
       /**
        * This function copies the data that has accumulated in the data buffer
        * for ghost indices to the owning processor. For the meaning of the
index da7ef55ac01f32f612f2f02eaad23cdc22fe8626..990c2950d3fc5db3d30d444dee0763549638ab72 100644 (file)
@@ -220,6 +220,19 @@ namespace LinearAlgebra
 
 
 
+    template <typename Number>
+    void
+    BlockVector<Number>::reinit(
+      const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+        &partitioners,
+      const bool /*make_ghosted*/,
+      const MPI_Comm &comm_sm)
+    {
+      reinit(partitioners, comm_sm);
+    }
+
+
+
     template <typename Number>
     BlockVector<Number> &
     BlockVector<Number>::operator=(const value_type s)
index 94e4a1aa7655550234c17be778e9b5f1c896c161..2c7694da107c4ab740bef1638461c2076be3060a 100644 (file)
@@ -245,6 +245,19 @@ namespace PETScWrappers
              const std::vector<IndexSet> &ghost_entries,
              const MPI_Comm               communicator);
 
+      /**
+       * Initialize each block given to each parallel partitioning described in
+       * @p partitioners.
+       *
+       * You can decide whether your vector will contain ghost elements with
+       * @p make_ghosted.
+       */
+      void
+      reinit(
+        const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+          &        partitioners,
+        const bool make_ghosted = true);
+
       /**
        * This function collects the sizes of the sub-objects and stores them
        * in internal arrays, in order to be able to relay global indices into
@@ -569,6 +582,26 @@ namespace PETScWrappers
 
 
 
+    inline void
+    BlockVector::reinit(
+      const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+        &        partitioners,
+      const bool make_ghosted)
+    {
+      // update the number of blocks
+      this->block_indices.reinit(partitioners.size(), 0);
+
+      // initialize each block
+      this->components.resize(this->n_blocks());
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        this->components[i].reinit(partitioners[i], make_ghosted);
+
+      // update block_indices content
+      this->collect_sizes();
+    }
+
+
+
     inline MPI_Comm
     BlockVector::get_mpi_communicator() const
     {
index 160792b7e6f77327b38e8f833e1788f3bc997d97..33c3ac309fd4651b3940ad78a1612ffd2a1884cc 100644 (file)
@@ -215,6 +215,22 @@ namespace TrilinosWrappers
              const MPI_Comm               communicator    = MPI_COMM_WORLD,
              const bool                   vector_writable = false);
 
+      /**
+       * Initialize each block given to each parallel partitioning described in
+       * @p partitioners.
+       *
+       * You can decide whether your vector will contain ghost elements with
+       * @p make_ghosted.
+       *
+       * The parameter @p vector_writable only has effect on ghosted vectors
+       * and is ignored for non-ghosted vectors.
+       */
+      void
+      reinit(
+        const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+          &        partitioners,
+        const bool make_ghosted    = true,
+        const bool vector_writable = false);
 
       /**
        * Change the dimension to that of the vector <tt>V</tt>. The same
index d72464c69c129fe923c3f562166b611f0348a05e..4ce2b52c91e18d3c280ede04dcbefa08b523313f 100644 (file)
@@ -113,6 +113,29 @@ namespace TrilinosWrappers
 
 
 
+    void
+    BlockVector::reinit(
+      const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
+        &        partitioners,
+      const bool make_ghosted,
+      const bool vector_writable)
+    {
+      // update the number of blocks
+      this->block_indices.reinit(partitioners.size(), 0);
+
+      // initialize each block
+      this->components.resize(this->n_blocks());
+      for (unsigned int i = 0; i < this->n_blocks(); ++i)
+        this->components[i].reinit(partitioners[i],
+                                   make_ghosted,
+                                   vector_writable);
+
+      // update block_indices content
+      this->collect_sizes();
+    }
+
+
+
     void
     BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
     {
similarity index 96%
rename from tests/mpi/parallel_block_vector_14.cc
rename to tests/mpi/parallel_block_vector_reinit_01.cc
index 071c0d44d9dc4675d78a0a72623020496c33f694..1fd91de2013f1c4e87badca6884c72343392065a 100644 (file)
@@ -1,6 +1,6 @@
 // ---------------------------------------------------------------------
 //
-// Copyright (C) 2022 by the deal.II authors
+// Copyright (C) 2022 - 2023 by the deal.II authors
 //
 // This file is part of the deal.II library.
 //
@@ -79,7 +79,7 @@ test()
 
   {
     LinearAlgebra::distributed::BlockVector<double> block_vector(partitioners);
-    deallog << "partitioner: OK" << std::endl;
+    deallog << "partitioners: OK" << std::endl;
   }
 }
 
similarity index 71%
rename from tests/mpi/parallel_block_vector_14.mpirun=10.output
rename to tests/mpi/parallel_block_vector_reinit_01.mpirun=10.output
index 9d10939766d2a60a28e34a414aa61d722e785462..2edc592cfe4bc82a0dda92d2bcde596cce661fe4 100644 (file)
@@ -1,4 +1,4 @@
 
 DEAL::w/o ghost indices: OK
 DEAL::w/  ghost indices: OK
-DEAL::partitioner: OK
+DEAL::partitioners: OK
diff --git a/tests/petsc/parallel_block_vector_reinit_01.cc b/tests/petsc/parallel_block_vector_reinit_01.cc
new file mode 100644 (file)
index 0000000..de1885f
--- /dev/null
@@ -0,0 +1,101 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// test various constructors and reinit functions
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/petsc_block_vector.h>
+
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+  MPI_Comm comm = MPI_COMM_WORLD;
+
+  const unsigned int myid    = Utilities::MPI::this_mpi_process(comm);
+  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(comm);
+
+  constexpr unsigned int n_blocks                     = 3;
+  constexpr unsigned int n_indices_per_proc_and_block = 2;
+
+  const unsigned int n_indices_per_block =
+    n_indices_per_proc_and_block * n_procs;
+
+  // set up partitioning
+  std::vector<IndexSet> owned_indexsets;
+  std::vector<IndexSet> relevant_indexsets;
+  std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>> partitioners;
+  for (unsigned int b = 0; b < n_blocks; ++b)
+    {
+      // For PETSc, the IndexSet for each block must be complete over all
+      // processes, i.e., their union has to start from zero and must cover all
+      // indices.
+
+      IndexSet owned(n_indices_per_block);
+      owned.add_range(n_indices_per_proc_and_block * myid,
+                      n_indices_per_proc_and_block * (myid + 1));
+
+      IndexSet relevant(n_indices_per_block);
+      relevant.add_range(1, 2);
+
+      partitioners.push_back(
+        std::make_shared<const Utilities::MPI::Partitioner>(owned,
+                                                            relevant,
+                                                            comm));
+      owned_indexsets.push_back(std::move(owned));
+      relevant_indexsets.push_back(std::move(relevant));
+    }
+
+  // create block vectors using different constructors
+  PETScWrappers::MPI::BlockVector block_vector;
+
+  block_vector.reinit(owned_indexsets, comm);
+  AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+  deallog << "w/o ghost indices: OK" << std::endl;
+
+  block_vector.reinit(owned_indexsets, relevant_indexsets, comm);
+  AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+  deallog << "w/  ghost indices: OK" << std::endl;
+
+  block_vector.reinit(partitioners, /*make_ghosted=*/false);
+  AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+  deallog << "partitioners w/o ghost indices: OK" << std::endl;
+
+  block_vector.reinit(partitioners, /*make_ghosted=*/true);
+  AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+  deallog << "partitioners w/  ghost indices: OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+  if (myid == 0)
+    initlog();
+
+  test();
+}
diff --git a/tests/petsc/parallel_block_vector_reinit_01.mpirun=10.output b/tests/petsc/parallel_block_vector_reinit_01.mpirun=10.output
new file mode 100644 (file)
index 0000000..0daae16
--- /dev/null
@@ -0,0 +1,5 @@
+
+DEAL::w/o ghost indices: OK
+DEAL::w/  ghost indices: OK
+DEAL::partitioners w/o ghost indices: OK
+DEAL::partitioners w/  ghost indices: OK
diff --git a/tests/trilinos/parallel_block_vector_reinit_01.cc b/tests/trilinos/parallel_block_vector_reinit_01.cc
new file mode 100644 (file)
index 0000000..9ccece0
--- /dev/null
@@ -0,0 +1,100 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// test various constructors and reinit functions
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+  MPI_Comm comm = MPI_COMM_WORLD;
+
+  const unsigned int myid    = Utilities::MPI::this_mpi_process(comm);
+  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(comm);
+
+  constexpr unsigned int n_blocks                     = 3;
+  constexpr unsigned int n_indices_per_proc_and_block = 2;
+
+  const unsigned int n_indices_per_block =
+    n_indices_per_proc_and_block * n_procs;
+  const unsigned int n_indices = n_blocks * n_indices_per_block;
+
+  // set up partitioning
+  std::vector<IndexSet> owned_indexsets;
+  std::vector<IndexSet> relevant_indexsets;
+  std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>> partitioners;
+  for (unsigned int b = 0; b < n_blocks; ++b)
+    {
+      const unsigned int begin = b * n_indices_per_block;
+
+      IndexSet owned(n_indices);
+      owned.add_range(begin + n_indices_per_proc_and_block * myid,
+                      begin + n_indices_per_proc_and_block * (myid + 1));
+
+      IndexSet relevant(n_indices);
+      relevant.add_range(begin + 1, begin + 2);
+
+      partitioners.push_back(
+        std::make_shared<const Utilities::MPI::Partitioner>(owned,
+                                                            relevant,
+                                                            comm));
+      owned_indexsets.push_back(std::move(owned));
+      relevant_indexsets.push_back(std::move(relevant));
+    }
+
+  // create block vectors using different constructors
+  TrilinosWrappers::MPI::BlockVector block_vector;
+
+  block_vector.reinit(owned_indexsets, comm);
+  AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+  deallog << "w/o ghost indices: OK" << std::endl;
+
+  block_vector.reinit(owned_indexsets, relevant_indexsets, comm);
+  AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+  deallog << "w/  ghost indices: OK" << std::endl;
+
+  block_vector.reinit(partitioners, /*make_ghosted=*/false);
+  AssertThrow(block_vector.has_ghost_elements() == false, ExcInternalError());
+  deallog << "partitioners w/o ghost indices: OK" << std::endl;
+
+  block_vector.reinit(partitioners, /*make_ghosted=*/true);
+  AssertThrow(block_vector.has_ghost_elements() == true, ExcInternalError());
+  deallog << "partitioners w/  ghost indices: OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+  if (myid == 0)
+    initlog();
+
+  test();
+}
diff --git a/tests/trilinos/parallel_block_vector_reinit_01.mpirun=10.output b/tests/trilinos/parallel_block_vector_reinit_01.mpirun=10.output
new file mode 100644 (file)
index 0000000..0daae16
--- /dev/null
@@ -0,0 +1,5 @@
+
+DEAL::w/o ghost indices: OK
+DEAL::w/  ghost indices: OK
+DEAL::partitioners w/o ghost indices: OK
+DEAL::partitioners w/  ghost indices: OK

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.