From 09de4d0e58254863a4f4d50bbb519aaaee8857ed Mon Sep 17 00:00:00 2001
From: ESeNonFossiIo <esenonfossiio@gmail.com>
Date: Mon, 18 Apr 2016 10:49:47 +0200
Subject: [PATCH] Fix ReinitHelper for PETSc and test vmult for
 PETScSparseMatrix

---
 .../deal.II/lac/petsc_block_sparse_matrix.h   |  4 +-
 include/deal.II/lac/petsc_block_vector.h      |  4 +-
 source/lac/petsc_block_sparse_matrix.cc       |  4 +-
 source/lac/petsc_parallel_sparse_matrix.cc    | 32 +++++++---------
 tests/lac/linear_operator_09.cc               | 38 +++++++++++++++++--
 ...ator_09.with_cxx11=on.with_petsc=on.output |  4 ++
 ...ith_petsc=on.with_mpi=true.mpirun=2.output |  6 +++
 ...ith_petsc=on.with_mpi=true.mpirun=4.output |  6 +++
 8 files changed, 71 insertions(+), 27 deletions(-)
 create mode 100644 tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=2.output
 create mode 100644 tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=4.output

diff --git a/include/deal.II/lac/petsc_block_sparse_matrix.h b/include/deal.II/lac/petsc_block_sparse_matrix.h
index 41115858ca..073f0eac78 100644
--- a/include/deal.II/lac/petsc_block_sparse_matrix.h
+++ b/include/deal.II/lac/petsc_block_sparse_matrix.h
@@ -210,14 +210,14 @@ namespace PETScWrappers
      * Return the partitioning of the domain space of this matrix, i.e., the
      * partitioning of the vectors this matrix has to be multiplied with.
      */
-    std::vector< size_type > locally_domain_sizes() const;
+    std::vector< size_type > locally_owned_domain_sizes() const;
 
     /**
      * Return the partitioning of the range space of this matrix, i.e., the
      * partitioning of the vectors that are result from matrix-vector
      * products.
      */
-    std::vector< size_type > locally_range_sizes() const;
+    std::vector< size_type > locally_owned_range_sizes() const;
 
     /**
      * Make the clear() function in the base class visible, though it is
diff --git a/include/deal.II/lac/petsc_block_vector.h b/include/deal.II/lac/petsc_block_vector.h
index e912a3367c..c95b456cd1 100644
--- a/include/deal.II/lac/petsc_block_vector.h
+++ b/include/deal.II/lac/petsc_block_vector.h
@@ -479,7 +479,7 @@ namespace internal
                                 PETScWrappers::BlockVector &v,
                                 bool omit_zeroing_entries)
       {
-        v.reinit(matrix.locally_range_sizes(), omit_zeroing_entries);
+        v.reinit(matrix.locally_owned_range_sizes(), omit_zeroing_entries);
       }
 
       template <typename Matrix>
@@ -488,7 +488,7 @@ namespace internal
                                 PETScWrappers::BlockVector &v,
                                 bool omit_zeroing_entries)
       {
-        v.reinit(matrix.locally_domain_sizes(), omit_zeroing_entries);
+        v.reinit(matrix.locally_owned_domain_sizes(), omit_zeroing_entries);
       }
     };
 
diff --git a/source/lac/petsc_block_sparse_matrix.cc b/source/lac/petsc_block_sparse_matrix.cc
index 23b5be69d9..75cecb3913 100644
--- a/source/lac/petsc_block_sparse_matrix.cc
+++ b/source/lac/petsc_block_sparse_matrix.cc
@@ -68,7 +68,7 @@ namespace PETScWrappers
 
   std::vector<BlockSparseMatrix::size_type >
   BlockSparseMatrix::
-  locally_domain_sizes() const
+  locally_owned_domain_sizes() const
   {
     std::vector< size_type > index_sets;
 
@@ -80,7 +80,7 @@ namespace PETScWrappers
 
   std::vector<BlockSparseMatrix::size_type >
   BlockSparseMatrix::
-  locally_range_sizes() const
+  locally_owned_range_sizes() const
   {
     std::vector< size_type > index_sets;
 
diff --git a/source/lac/petsc_parallel_sparse_matrix.cc b/source/lac/petsc_parallel_sparse_matrix.cc
index c4d0c84eb5..9e57005665 100644
--- a/source/lac/petsc_parallel_sparse_matrix.cc
+++ b/source/lac/petsc_parallel_sparse_matrix.cc
@@ -896,25 +896,23 @@ namespace PETScWrappers
 #if DEAL_II_PETSC_VERSION_LT(3,3,0)
       Assert(false,ExcNotImplemented());
 #else
-      PetscInt n_rows, n_cols, min, max, size;
+      PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max, size;
       PetscErrorCode ierr;
-      IS rows, cols;
 
       ierr = MatGetSize (matrix, &n_rows, &n_cols);
       AssertThrow (ierr == 0, ExcPETScError(ierr));
 
-      ierr = MatGetOwnershipIS(matrix, &rows, &cols);
+      ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
       AssertThrow (ierr == 0, ExcPETScError(ierr));
 
-      ierr = ISGetMinMax(rows, &min, &max);
+      ierr = MatGetOwnershipRangeColumn(matrix, &min, &max);
       AssertThrow (ierr == 0, ExcPETScError(ierr));
 
-      IndexSet indices(n_rows);
-      indices.add_range(min, max);
+      Assert(n_loc_cols==max-min, ExcMessage("PETSc is requiring non contiguous memory allocation."));
 
-      ierr = ISGetLocalSize(rows, &size);
-      AssertThrow (ierr == 0, ExcPETScError(ierr));
-      Assert(size==max-min+1, ExcMessage("PETSc is requiring non contiguous memory allocation."));
+      IndexSet indices(n_cols);
+      indices.add_range(min, max);
+      indices.compress();
 
       return indices;
 #endif
@@ -926,25 +924,23 @@ namespace PETScWrappers
 #if DEAL_II_PETSC_VERSION_LT(3,3,0)
       Assert(false,ExcNotImplemented());
 #else
-      PetscInt n_rows, n_cols, min, max, size;
+      PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max, size;
       PetscErrorCode ierr;
-      IS rows, cols;
 
       ierr = MatGetSize (matrix, &n_rows, &n_cols);
       AssertThrow (ierr == 0, ExcPETScError(ierr));
 
-      ierr = MatGetOwnershipIS(matrix, &rows, &cols);
+      ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
       AssertThrow (ierr == 0, ExcPETScError(ierr));
 
-      ierr = ISGetMinMax(cols, &min, &max);
+      ierr = MatGetOwnershipRange(matrix, &min, &max);
       AssertThrow (ierr == 0, ExcPETScError(ierr));
 
-      IndexSet indices(n_cols);
-      indices.add_range(min, max);
+      Assert(n_loc_rows==max-min, ExcMessage("PETSc is requiring non contiguous memory allocation."));
 
-      ierr = ISGetLocalSize(cols, &size);
-      AssertThrow (ierr == 0, ExcPETScError(ierr));
-      Assert(size==max-min+1, ExcMessage("PETSc is requiring non contiguous memory allocation."));
+      IndexSet indices(n_rows);
+      indices.add_range(min, max);
+      indices.compress();
 
       return indices;
 #endif
diff --git a/tests/lac/linear_operator_09.cc b/tests/lac/linear_operator_09.cc
index 43a3279f1b..e1d80fea52 100644
--- a/tests/lac/linear_operator_09.cc
+++ b/tests/lac/linear_operator_09.cc
@@ -41,29 +41,61 @@ using namespace dealii;
 
 int main(int argc, char *argv[])
 {
+  typedef PETScWrappers::MPI::SparseMatrix::size_type size_type;
+
   Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
 
   initlog();
   deallog << std::setprecision(10);
 
   {
-    PETScWrappers::SparseMatrix a;
+    PETScWrappers::SparseMatrix a(2,2,2);
+    for (unsigned int i = 0; i<2; ++i)
+      for (unsigned int j = 0; j<2; ++j)
+        a.add (i,j, 2);
+    a.compress (VectorOperation::add);
+
+    PETScWrappers::Vector v(2);
+    for (unsigned int i = 0; i<2; ++i)
+      v[i] = 1;
+    PETScWrappers::Vector u(v);
     auto op_a  = linear_operator<PETScWrappers::Vector>(a);
+    op_a.vmult(u,v);
+    deallog << "SparseMatrix -> OK" << std::endl;
   }
 
   {
-    PETScWrappers::MPI::SparseMatrix a;
-    auto op_a  = linear_operator<PETScWrappers::MPI::Vector>(a);
+    unsigned int np = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+    if (4%np==0 && np<=4)
+      {
+        PETScWrappers::MPI::SparseMatrix a (MPI_COMM_WORLD, 4, 4, 4/np, 4/np, 1);
+        for (unsigned int i = 0; i<4; ++i)
+          for (unsigned int j = 0; j<4; ++j)
+            a.add (i,i, 1);
+        a.compress (VectorOperation::add);
+        auto op_a  = linear_operator<PETScWrappers::MPI::Vector>(a);
+
+        PETScWrappers::MPI::Vector u,v;
+        op_a.reinit_domain_vector(u, true);
+        op_a.reinit_range_vector(v, true);
+        for (auto i : u.locally_owned_elements()) u[i]  = 1;
+        for (auto i : v.locally_owned_elements()) v[i]  = 1;
+
+        op_a.vmult(v,u);
+      }
+    deallog << "SparseMatrix MPI -> OK" << std::endl;
   }
 
   {
     PETScWrappers::BlockSparseMatrix a;
     auto op_a  = linear_operator<PETScWrappers::BlockVector>(a);
+    deallog << "BlockSparseMatrix -> OK" << std::endl;
   }
 
   {
     PETScWrappers::MPI::BlockSparseMatrix a;
     auto op_a  = linear_operator<PETScWrappers::MPI::BlockVector>(a);
+    deallog << "BlockSparseMatrix MPI -> OK" << std::endl;
   }
 
   deallog << "OK" << std::endl;
diff --git a/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.output b/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.output
index 0fd8fc12f0..441b883d69 100644
--- a/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.output
+++ b/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.output
@@ -1,2 +1,6 @@
 
+DEAL::SparseMatrix -> OK
+DEAL::SparseMatrix MPI -> OK
+DEAL::BlockSparseMatrix -> OK
+DEAL::BlockSparseMatrix MPI -> OK
 DEAL::OK
diff --git a/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=2.output b/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=2.output
new file mode 100644
index 0000000000..441b883d69
--- /dev/null
+++ b/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=2.output
@@ -0,0 +1,6 @@
+
+DEAL::SparseMatrix -> OK
+DEAL::SparseMatrix MPI -> OK
+DEAL::BlockSparseMatrix -> OK
+DEAL::BlockSparseMatrix MPI -> OK
+DEAL::OK
diff --git a/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=4.output b/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=4.output
new file mode 100644
index 0000000000..441b883d69
--- /dev/null
+++ b/tests/lac/linear_operator_09.with_cxx11=on.with_petsc=on.with_mpi=true.mpirun=4.output
@@ -0,0 +1,6 @@
+
+DEAL::SparseMatrix -> OK
+DEAL::SparseMatrix MPI -> OK
+DEAL::BlockSparseMatrix -> OK
+DEAL::BlockSparseMatrix MPI -> OK
+DEAL::OK
-- 
2.39.5