From 4623b50ea4a12177753986c0ae19cec38aaf2020 Mon Sep 17 00:00:00 2001 From: Stefano Zampini Date: Fri, 11 Nov 2022 18:53:36 +0100 Subject: [PATCH] PETScWrappers: improve BlockVector class remove the virtual method get_mpi_communicator, and always return the communicator of the PETSc object --- include/deal.II/lac/petsc_block_vector.h | 52 +++++++++++- include/deal.II/lac/petsc_full_matrix.h | 8 -- include/deal.II/lac/petsc_matrix_base.h | 11 +-- include/deal.II/lac/petsc_matrix_free.h | 23 +---- include/deal.II/lac/petsc_sparse_matrix.h | 41 ++------- include/deal.II/lac/petsc_vector.h | 30 ++----- include/deal.II/lac/petsc_vector_base.h | 17 +++- source/lac/petsc_full_matrix.cc | 7 -- source/lac/petsc_matrix_free.cc | 34 ++++---- .../lac/petsc_parallel_block_sparse_matrix.cc | 7 +- source/lac/petsc_parallel_block_vector.cc | 83 +++++++++++++++++++ source/lac/petsc_parallel_sparse_matrix.cc | 48 +++++------ source/lac/petsc_parallel_vector.cc | 65 ++++++++------- source/lac/petsc_sparse_matrix.cc | 12 --- source/lac/petsc_vector_base.cc | 15 ++++ tests/petsc/block_vector_iterator_03.cc | 7 ++ tests/petsc/block_vector_iterator_03.output | 41 +++++++++ tests/petsc/copy_to_dealvec_block.cc | 13 ++- 18 files changed, 323 insertions(+), 191 deletions(-) diff --git a/include/deal.II/lac/petsc_block_vector.h b/include/deal.II/lac/petsc_block_vector.h index ebe458fbe1..299c13c6e5 100644 --- a/include/deal.II/lac/petsc_block_vector.h +++ b/include/deal.II/lac/petsc_block_vector.h @@ -130,12 +130,16 @@ namespace PETScWrappers const std::vector &ghost_indices, const MPI_Comm & communicator); + /** + * Create a BlockVector with a PETSc Vec + */ + explicit BlockVector(Vec v); /** * Destructor. Clears memory */ - ~BlockVector() override = default; + ~BlockVector() override; /** * Copy operator: fill all components of the vector that are locally @@ -150,6 +154,13 @@ namespace PETScWrappers BlockVector & operator=(const BlockVector &V); + /** + * This method assignes the PETSc Vec to the instance of the class. + * + */ + void + assign_petsc_vector(Vec v); + /** * Reinitialize the BlockVector to contain @p n_blocks of size @p * block_size, each of which stores @p locally_owned_size elements @@ -225,6 +236,16 @@ namespace PETScWrappers const std::vector &ghost_entries, const MPI_Comm & communicator); + /** + * This function collects the sizes of the sub-objects and stores them + * in internal arrays, in order to be able to relay global indices into + * the vector to indices into the subobjects. You *must* call this + * function each time after you have changed the size of the sub- + * objects. + */ + void + collect_sizes(); + /** * Change the number of blocks to num_blocks. The individual * blocks will get initialized with zero size, so it is assumed that the @@ -247,6 +268,14 @@ namespace PETScWrappers const MPI_Comm & get_mpi_communicator() const; + /** + * Return a reference to the underlying PETSc type. It can be used to + * modify the underlying data, so use it only when you know what you + * are doing. + */ + Vec & + petsc_vector(); + /** * Swap the contents of this vector and the other vector v. One * could do this operation with a temporary variable and copying over @@ -284,6 +313,12 @@ namespace PETScWrappers * Exception */ DeclException0(ExcNonMatchingBlockVectors); + + private: + void + setup_nest_vec(); + + Vec petsc_nest_vector = nullptr; }; /** @} */ @@ -336,6 +371,12 @@ namespace PETScWrappers reinit(parallel_partitioning, ghost_indices, communicator); } + inline BlockVector::BlockVector(Vec v) + : BlockVectorBase() + { + this->assign_petsc_vector(v); + } + inline BlockVector & BlockVector::operator=(const value_type s) { @@ -454,7 +495,12 @@ namespace PETScWrappers inline const MPI_Comm & BlockVector::get_mpi_communicator() const { - return block(0).get_mpi_communicator(); + static MPI_Comm comm = PETSC_COMM_SELF; + MPI_Comm pcomm = + PetscObjectComm(reinterpret_cast(petsc_nest_vector)); + if (pcomm != MPI_COMM_NULL) + comm = pcomm; + return comm; } inline bool @@ -473,6 +519,7 @@ namespace PETScWrappers BlockVector::swap(BlockVector &v) { std::swap(this->components, v.components); + std::swap(this->petsc_nest_vector, v.petsc_nest_vector); ::dealii::swap(this->block_indices, v.block_indices); } @@ -509,7 +556,6 @@ namespace PETScWrappers { u.swap(v); } - } // namespace MPI } // namespace PETScWrappers diff --git a/include/deal.II/lac/petsc_full_matrix.h b/include/deal.II/lac/petsc_full_matrix.h index 739dc50713..ccc8cd5c15 100644 --- a/include/deal.II/lac/petsc_full_matrix.h +++ b/include/deal.II/lac/petsc_full_matrix.h @@ -76,14 +76,6 @@ namespace PETScWrappers reinit(const size_type m, const size_type n); - /** - * Return a reference to the MPI communicator object in use with this - * matrix. Since this is a sequential matrix, it returns the MPI_COMM_SELF - * communicator. - */ - virtual const MPI_Comm & - get_mpi_communicator() const override; - private: /** * Do the actual work for the respective reinit() function and the diff --git a/include/deal.II/lac/petsc_matrix_base.h b/include/deal.II/lac/petsc_matrix_base.h index 4c1eb9c204..4c1b3e792b 100644 --- a/include/deal.II/lac/petsc_matrix_base.h +++ b/include/deal.II/lac/petsc_matrix_base.h @@ -671,10 +671,9 @@ namespace PETScWrappers /** * Return a reference to the MPI communicator object in use with this - * matrix. If not implemented, it returns the communicator used by the - * PETSc Mat. + * matrix. */ - virtual const MPI_Comm & + const MPI_Comm & get_mpi_communicator() const; /** @@ -1642,8 +1641,10 @@ namespace PETScWrappers inline const MPI_Comm & MatrixBase::get_mpi_communicator() const { - static MPI_Comm comm; - PetscObjectGetComm(reinterpret_cast(matrix), &comm); + static MPI_Comm comm = PETSC_COMM_SELF; + MPI_Comm pcomm = PetscObjectComm(reinterpret_cast(matrix)); + if (pcomm != MPI_COMM_NULL) + comm = pcomm; return comm; } diff --git a/include/deal.II/lac/petsc_matrix_free.h b/include/deal.II/lac/petsc_matrix_free.h index d9966ed185..3b5b723f10 100644 --- a/include/deal.II/lac/petsc_matrix_free.h +++ b/include/deal.II/lac/petsc_matrix_free.h @@ -176,13 +176,6 @@ namespace PETScWrappers void clear(); - /** - * Return a reference to the MPI communicator object in use with this - * matrix. - */ - const MPI_Comm & - get_mpi_communicator() const override; - /** * Matrix-vector multiplication: let dst = M*src with M * being this matrix. @@ -253,12 +246,6 @@ namespace PETScWrappers vmult(Vec &dst, const Vec &src) const; private: - /** - * Copy of the communicator object to be used for this parallel matrix- - * free object. - */ - MPI_Comm communicator; - /** * Callback-function registered as the matrix-vector multiplication of * this matrix-free object called by PETSc routines. This function must be @@ -279,7 +266,8 @@ namespace PETScWrappers * previous matrix is left to the caller. */ void - do_reinit(const unsigned int m, + do_reinit(const MPI_Comm & comm, + const unsigned int m, const unsigned int n, const unsigned int local_rows, const unsigned int local_columns); @@ -287,13 +275,6 @@ namespace PETScWrappers - // -------- template and inline functions ---------- - - inline const MPI_Comm & - MatrixFree::get_mpi_communicator() const - { - return communicator; - } } // namespace PETScWrappers DEAL_II_NAMESPACE_CLOSE diff --git a/include/deal.II/lac/petsc_sparse_matrix.h b/include/deal.II/lac/petsc_sparse_matrix.h index 08d7734571..f4022536fe 100644 --- a/include/deal.II/lac/petsc_sparse_matrix.h +++ b/include/deal.II/lac/petsc_sparse_matrix.h @@ -211,14 +211,6 @@ namespace PETScWrappers reinit(const SparsityPatternType &sparsity_pattern, const bool preset_nonzero_locations = true); - /** - * Return a reference to the MPI communicator object in use with this - * matrix. Since this is a sequential matrix, it returns the MPI_COMM_SELF - * communicator. - */ - virtual const MPI_Comm & - get_mpi_communicator() const override; - /** * Return the number of rows of this matrix. */ @@ -257,8 +249,7 @@ namespace PETScWrappers private: /** * Do the actual work for the respective reinit() function and the - * matching constructor, i.e. create a matrix. Getting rid of the previous - * matrix is left to the caller. + * matching constructor, i.e. create a matrix. */ void do_reinit(const size_type m, @@ -519,13 +510,6 @@ namespace PETScWrappers const SparsityPatternType &sparsity_pattern, const MPI_Comm & communicator); - /** - * Return a reference to the MPI communicator object in use with this - * matrix. - */ - virtual const MPI_Comm & - get_mpi_communicator() const override; - /** * @addtogroup Exceptions * @{ @@ -609,17 +593,13 @@ namespace PETScWrappers const MPI::Vector & V = MPI::Vector()) const; private: - /** - * Copy of the communicator object to be used for this parallel vector. - */ - MPI_Comm communicator; - /** * Same as previous functions. */ template void - do_reinit(const SparsityPatternType & sparsity_pattern, + do_reinit(const MPI_Comm & comm, + const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, const unsigned int this_process, @@ -630,7 +610,8 @@ namespace PETScWrappers */ template void - do_reinit(const IndexSet & local_rows, + do_reinit(const MPI_Comm & comm, + const IndexSet & local_rows, const IndexSet & local_columns, const SparsityPatternType &sparsity_pattern); @@ -640,7 +621,8 @@ namespace PETScWrappers */ template void - do_reinit(const IndexSet & local_rows, + do_reinit(const MPI_Comm & comm, + const IndexSet & local_rows, const IndexSet & local_active_rows, const IndexSet & local_columns, const IndexSet & local_active_columns, @@ -650,15 +632,6 @@ namespace PETScWrappers friend class BlockMatrixBase; }; - - - // -------- template and inline functions ---------- - - inline const MPI_Comm & - SparseMatrix::get_mpi_communicator() const - { - return communicator; - } } // namespace MPI } // namespace PETScWrappers diff --git a/include/deal.II/lac/petsc_vector.h b/include/deal.II/lac/petsc_vector.h index 520226ba74..fbfa3d0ba5 100644 --- a/include/deal.II/lac/petsc_vector.h +++ b/include/deal.II/lac/petsc_vector.h @@ -353,13 +353,6 @@ namespace PETScWrappers reinit( const std::shared_ptr &partitioner); - /** - * Return a reference to the MPI communicator object in use with this - * vector. - */ - const MPI_Comm & - get_mpi_communicator() const override; - /** * Print to a stream. @p precision denotes the desired precision with * which values shall be printed, @p scientific whether scientific @@ -394,7 +387,9 @@ namespace PETScWrappers * locally. */ virtual void - create_vector(const size_type n, const size_type locally_owned_size); + create_vector(const MPI_Comm &comm, + const size_type n, + const size_type locally_owned_size); @@ -404,16 +399,10 @@ namespace PETScWrappers * you need to call update_ghost_values() before accessing those. */ virtual void - create_vector(const size_type n, + create_vector(const MPI_Comm &comm, + const size_type n, const size_type locally_owned_size, const IndexSet &ghostnodes); - - - private: - /** - * Copy of the communicator object to be used for this parallel vector. - */ - MPI_Comm communicator; }; @@ -440,9 +429,8 @@ namespace PETScWrappers Vector::Vector(const MPI_Comm & communicator, const dealii::Vector &v, const size_type locally_owned_size) - : communicator(communicator) { - Vector::create_vector(v.size(), locally_owned_size); + Vector::create_vector(communicator, v.size(), locally_owned_size); *this = v; } @@ -501,12 +489,6 @@ namespace PETScWrappers - inline const MPI_Comm & - Vector::get_mpi_communicator() const - { - return communicator; - } - # endif // DOXYGEN } // namespace MPI } // namespace PETScWrappers diff --git a/include/deal.II/lac/petsc_vector_base.h b/include/deal.II/lac/petsc_vector_base.h index 5e84c1e42e..c97d89e7fc 100644 --- a/include/deal.II/lac/petsc_vector_base.h +++ b/include/deal.II/lac/petsc_vector_base.h @@ -327,6 +327,15 @@ namespace PETScWrappers VectorBase & operator=(const PetscScalar s); + /** + * This method assignes the PETSc Vec to the instance of the class. + * This is particularly useful when performing PETSc to Deal.II operations + * since it allows to reuse the Deal.II VectorBase and the PETSc Vec + * without incurring in memory copies. + */ + void + assign_petsc_vector(Vec v); + /** * Test for equality. This function assumes that the present vector and * the one to compare with have the same size already, since comparing @@ -795,7 +804,7 @@ namespace PETScWrappers * Return a reference to the MPI communicator object in use with this * object. */ - virtual const MPI_Comm & + const MPI_Comm & get_mpi_communicator() const; protected: @@ -1148,8 +1157,10 @@ namespace PETScWrappers inline const MPI_Comm & VectorBase::get_mpi_communicator() const { - static MPI_Comm comm; - PetscObjectGetComm(reinterpret_cast(vector), &comm); + static MPI_Comm comm = PETSC_COMM_SELF; + MPI_Comm pcomm = PetscObjectComm(reinterpret_cast(vector)); + if (pcomm != MPI_COMM_NULL) + comm = pcomm; return comm; } diff --git a/source/lac/petsc_full_matrix.cc b/source/lac/petsc_full_matrix.cc index c102774a80..f090aaa150 100644 --- a/source/lac/petsc_full_matrix.cc +++ b/source/lac/petsc_full_matrix.cc @@ -56,13 +56,6 @@ namespace PETScWrappers AssertThrow(ierr == 0, ExcPETScError(ierr)); } - - const MPI_Comm & - FullMatrix::get_mpi_communicator() const - { - static const MPI_Comm communicator = MPI_COMM_SELF; - return communicator; - } } // namespace PETScWrappers diff --git a/source/lac/petsc_matrix_free.cc b/source/lac/petsc_matrix_free.cc index 751d64d464..83e3a61432 100644 --- a/source/lac/petsc_matrix_free.cc +++ b/source/lac/petsc_matrix_free.cc @@ -26,10 +26,9 @@ DEAL_II_NAMESPACE_OPEN namespace PETScWrappers { MatrixFree::MatrixFree() - : communicator(PETSC_COMM_SELF) { const int m = 0; - do_reinit(m, m, m, m); + do_reinit(MPI_COMM_SELF, m, m, m, m); } @@ -39,9 +38,8 @@ namespace PETScWrappers const unsigned int n, const unsigned int local_rows, const unsigned int local_columns) - : communicator(communicator) { - do_reinit(m, n, local_rows, local_columns); + do_reinit(communicator, m, n, local_rows, local_columns); } @@ -53,14 +51,14 @@ namespace PETScWrappers const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, const unsigned int this_process) - : communicator(communicator) { Assert(local_rows_per_process.size() == local_columns_per_process.size(), ExcDimensionMismatch(local_rows_per_process.size(), local_columns_per_process.size())); Assert(this_process < local_rows_per_process.size(), ExcInternalError()); - do_reinit(m, + do_reinit(communicator, + m, n, local_rows_per_process[this_process], local_columns_per_process[this_process]); @@ -72,9 +70,8 @@ namespace PETScWrappers const unsigned int n, const unsigned int local_rows, const unsigned int local_columns) - : communicator(MPI_COMM_WORLD) { - do_reinit(m, n, local_rows, local_columns); + do_reinit(MPI_COMM_WORLD, m, n, local_rows, local_columns); } @@ -85,14 +82,14 @@ namespace PETScWrappers const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, const unsigned int this_process) - : communicator(MPI_COMM_WORLD) { Assert(local_rows_per_process.size() == local_columns_per_process.size(), ExcDimensionMismatch(local_rows_per_process.size(), local_columns_per_process.size())); Assert(this_process < local_rows_per_process.size(), ExcInternalError()); - do_reinit(m, + do_reinit(MPI_COMM_WORLD, + m, n, local_rows_per_process[this_process], local_columns_per_process[this_process]); @@ -107,13 +104,11 @@ namespace PETScWrappers const unsigned int local_rows, const unsigned int local_columns) { - this->communicator = communicator; - // destroy the matrix and generate a new one const PetscErrorCode ierr = destroy_matrix(matrix); AssertThrow(ierr == 0, ExcPETScError(ierr)); - do_reinit(m, n, local_rows, local_columns); + do_reinit(communicator, m, n, local_rows, local_columns); } @@ -131,11 +126,11 @@ namespace PETScWrappers local_columns_per_process.size())); Assert(this_process < local_rows_per_process.size(), ExcInternalError()); - this->communicator = communicator; const PetscErrorCode ierr = destroy_matrix(matrix); AssertThrow(ierr != 0, ExcPETScError(ierr)); - do_reinit(m, + do_reinit(communicator, + m, n, local_rows_per_process[this_process], local_columns_per_process[this_process]); @@ -149,7 +144,7 @@ namespace PETScWrappers const unsigned int local_rows, const unsigned int local_columns) { - reinit(this->communicator, m, n, local_rows, local_columns); + reinit(this->get_mpi_communicator(), m, n, local_rows, local_columns); } @@ -161,7 +156,7 @@ namespace PETScWrappers const std::vector &local_columns_per_process, const unsigned int this_process) { - reinit(this->communicator, + reinit(this->get_mpi_communicator(), m, n, local_rows_per_process, @@ -178,7 +173,7 @@ namespace PETScWrappers AssertThrow(ierr == 0, ExcPETScError(ierr)); const int m = 0; - do_reinit(m, m, m, m); + do_reinit(MPI_COMM_SELF, m, m, m, m); } @@ -216,7 +211,8 @@ namespace PETScWrappers void - MatrixFree::do_reinit(const unsigned int m, + MatrixFree::do_reinit(const MPI_Comm & communicator, + const unsigned int m, const unsigned int n, const unsigned int local_rows, const unsigned int local_columns) diff --git a/source/lac/petsc_parallel_block_sparse_matrix.cc b/source/lac/petsc_parallel_block_sparse_matrix.cc index 5a8567d954..2f2745fb05 100644 --- a/source/lac/petsc_parallel_block_sparse_matrix.cc +++ b/source/lac/petsc_parallel_block_sparse_matrix.cc @@ -188,7 +188,12 @@ namespace PETScWrappers const MPI_Comm & BlockSparseMatrix::get_mpi_communicator() const { - return block(0, 0).get_mpi_communicator(); + static MPI_Comm comm = PETSC_COMM_SELF; + MPI_Comm pcomm = + PetscObjectComm(reinterpret_cast(petsc_nest_matrix)); + if (pcomm != MPI_COMM_NULL) + comm = pcomm; + return comm; } BlockSparseMatrix::operator const Mat &() const diff --git a/source/lac/petsc_parallel_block_vector.cc b/source/lac/petsc_parallel_block_vector.cc index 46082c9ae9..c00e9dbdcd 100644 --- a/source/lac/petsc_parallel_block_vector.cc +++ b/source/lac/petsc_parallel_block_vector.cc @@ -25,6 +25,12 @@ namespace PETScWrappers { using size_type = types::global_dof_index; + BlockVector::~BlockVector() + { + PetscErrorCode ierr = VecDestroy(&petsc_nest_vector); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + } + void BlockVector::reinit(const unsigned int num_blocks) { @@ -38,6 +44,83 @@ namespace PETScWrappers collect_sizes(); } + + void + BlockVector::assign_petsc_vector(Vec v) + { + PetscBool isnest; + + PetscErrorCode ierr = + PetscObjectTypeCompare(reinterpret_cast(v), + VECNEST, + &isnest); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + std::vector sv; + if (isnest) + { + PetscInt nb; + ierr = VecNestGetSize(v, &nb); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + for (PetscInt i = 0; i < nb; ++i) + { + Vec vv; + ierr = VecNestGetSubVec(v, i, &vv); + sv.push_back(vv); + } + } + else + { + sv.push_back(v); + } + + auto nb = sv.size(); + + std::vector block_sizes(nb, 0); + this->block_indices.reinit(block_sizes); + + this->components.resize(nb); + for (unsigned int i = 0; i < nb; ++i) + { + this->components[i].assign_petsc_vector(sv[i]); + } + + this->collect_sizes(); + } + + Vec & + BlockVector::petsc_vector() + { + return petsc_nest_vector; + } + + void + BlockVector::collect_sizes() + { + BlockVectorBase::collect_sizes(); + setup_nest_vec(); + } + + void + BlockVector::setup_nest_vec() + { + PetscErrorCode ierr = VecDestroy(&petsc_nest_vector); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + + auto n = this->n_blocks(); + + std::vector pcomponents(n); + for (unsigned int i = 0; i < n; i++) + pcomponents[i] = this->components[i].petsc_vector(); + + MPI_Comm comm = + pcomponents.size() > 0 ? + PetscObjectComm(reinterpret_cast(pcomponents[0])) : + PETSC_COMM_SELF; + + ierr = + VecCreateNest(comm, n, NULL, pcomponents.data(), &petsc_nest_vector); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + } } // namespace MPI } // namespace PETScWrappers diff --git a/source/lac/petsc_parallel_sparse_matrix.cc b/source/lac/petsc_parallel_sparse_matrix.cc index 1d1e04efee..3e2f121a5e 100644 --- a/source/lac/petsc_parallel_sparse_matrix.cc +++ b/source/lac/petsc_parallel_sparse_matrix.cc @@ -32,7 +32,6 @@ namespace PETScWrappers namespace MPI { SparseMatrix::SparseMatrix() - : communicator(MPI_COMM_SELF) { // just like for vectors: since we // create an empty matrix, we can as @@ -59,9 +58,9 @@ namespace PETScWrappers const std::vector &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations) - : communicator(communicator) { - do_reinit(sparsity_pattern, + do_reinit(communicator, + sparsity_pattern, local_rows_per_process, local_columns_per_process, this_process, @@ -76,8 +75,6 @@ namespace PETScWrappers if (&other == this) return; - this->communicator = other.communicator; - PetscErrorCode ierr = destroy_matrix(matrix); AssertThrow(ierr == 0, ExcPETScError(ierr)); @@ -94,13 +91,12 @@ namespace PETScWrappers const SparsityPatternType &sparsity_pattern, const MPI_Comm & communicator) { - this->communicator = communicator; - // get rid of old matrix and generate a new one const PetscErrorCode ierr = destroy_matrix(matrix); AssertThrow(ierr == 0, ExcPETScError(ierr)); - do_reinit(local_rows, + do_reinit(communicator, + local_rows, local_active_rows, local_columns, local_active_columns, @@ -121,8 +117,6 @@ namespace PETScWrappers if (&other == this) return; - this->communicator = other.communicator; - const PetscErrorCode ierr = MatCopy(other.matrix, matrix, SAME_NONZERO_PATTERN); AssertThrow(ierr == 0, ExcPETScError(ierr)); @@ -140,14 +134,13 @@ namespace PETScWrappers const unsigned int this_process, const bool preset_nonzero_locations) { - this->communicator = communicator; - // get rid of old matrix and generate a new one const PetscErrorCode ierr = destroy_matrix(matrix); AssertThrow(ierr == 0, ExcPETScError(ierr)); - do_reinit(sparsity_pattern, + do_reinit(communicator, + sparsity_pattern, local_rows_per_process, local_columns_per_process, this_process, @@ -163,20 +156,19 @@ namespace PETScWrappers const SparsityPatternType &sparsity_pattern, const MPI_Comm & communicator) { - this->communicator = communicator; - // get rid of old matrix and generate a new one const PetscErrorCode ierr = destroy_matrix(matrix); AssertThrow(ierr == 0, ExcPETScError(ierr)); - do_reinit(local_rows, local_columns, sparsity_pattern); + do_reinit(communicator, local_rows, local_columns, sparsity_pattern); } template void - SparseMatrix::do_reinit(const IndexSet & local_rows, + SparseMatrix::do_reinit(const MPI_Comm & communicator, + const IndexSet & local_rows, const IndexSet & local_columns, const SparsityPatternType &sparsity_pattern) { @@ -334,6 +326,7 @@ namespace PETScWrappers template void SparseMatrix::do_reinit( + const MPI_Comm & communicator, const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, @@ -458,7 +451,8 @@ namespace PETScWrappers // BDDC template void - SparseMatrix::do_reinit(const IndexSet & local_rows, + SparseMatrix::do_reinit(const MPI_Comm & communicator, + const IndexSet & local_rows, const IndexSet & local_active_rows, const IndexSet & local_columns, const IndexSet & local_active_columns, @@ -724,25 +718,29 @@ namespace PETScWrappers const MPI_Comm &); template void - SparseMatrix::do_reinit(const SparsityPattern &, + SparseMatrix::do_reinit(const MPI_Comm &, + const SparsityPattern &, const std::vector &, const std::vector &, const unsigned int, const bool); template void - SparseMatrix::do_reinit(const DynamicSparsityPattern &, + SparseMatrix::do_reinit(const MPI_Comm &, + const DynamicSparsityPattern &, const std::vector &, const std::vector &, const unsigned int, const bool); template void - SparseMatrix::do_reinit(const IndexSet &, + SparseMatrix::do_reinit(const MPI_Comm &, + const IndexSet &, const IndexSet &, const SparsityPattern &); template void - SparseMatrix::do_reinit(const IndexSet &, + SparseMatrix::do_reinit(const MPI_Comm &, + const IndexSet &, const IndexSet &, const DynamicSparsityPattern &); @@ -762,13 +760,15 @@ namespace PETScWrappers const MPI_Comm &); template void - SparseMatrix::do_reinit(const IndexSet &, + SparseMatrix::do_reinit(const MPI_Comm &, + const IndexSet &, const IndexSet &, const IndexSet &, const IndexSet &, const SparsityPattern &); template void - SparseMatrix::do_reinit(const IndexSet &, + SparseMatrix::do_reinit(const MPI_Comm &, + const IndexSet &, const IndexSet &, const IndexSet &, const IndexSet &, diff --git a/source/lac/petsc_parallel_vector.cc b/source/lac/petsc_parallel_vector.cc index 61a5cdd68e..79a41b2dd8 100644 --- a/source/lac/petsc_parallel_vector.cc +++ b/source/lac/petsc_parallel_vector.cc @@ -29,12 +29,11 @@ namespace PETScWrappers namespace MPI { Vector::Vector() - : communicator(MPI_COMM_SELF) { // virtual functions called in constructors and destructors never use the // override in a derived class // for clarity be explicit on which function is called - Vector::create_vector(0, 0); + Vector::create_vector(MPI_COMM_SELF, 0, 0); } @@ -42,9 +41,8 @@ namespace PETScWrappers Vector::Vector(const MPI_Comm &communicator, const size_type n, const size_type locally_owned_size) - : communicator(communicator) { - Vector::create_vector(n, locally_owned_size); + Vector::create_vector(communicator, n, locally_owned_size); } @@ -52,7 +50,6 @@ namespace PETScWrappers Vector::Vector(const IndexSet &local, const IndexSet &ghost, const MPI_Comm &communicator) - : communicator(communicator) { Assert(local.is_ascending_and_one_to_one(communicator), ExcNotImplemented()); @@ -60,21 +57,26 @@ namespace PETScWrappers IndexSet ghost_set = ghost; ghost_set.subtract_set(local); - Vector::create_vector(local.size(), local.n_elements(), ghost_set); + Vector::create_vector(communicator, + local.size(), + local.n_elements(), + ghost_set); } Vector::Vector(const Vector &v) : VectorBase() - , communicator(v.communicator) { if (v.has_ghost_elements()) - Vector::create_vector(v.size(), + Vector::create_vector(v.get_mpi_communicator(), + v.size(), v.locally_owned_size(), v.ghost_indices); else - Vector::create_vector(v.size(), v.locally_owned_size()); + Vector::create_vector(v.get_mpi_communicator(), + v.size(), + v.locally_owned_size()); this->operator=(v); } @@ -82,11 +84,10 @@ namespace PETScWrappers Vector::Vector(const IndexSet &local, const MPI_Comm &communicator) - : communicator(communicator) { Assert(local.is_ascending_and_one_to_one(communicator), ExcNotImplemented()); - Vector::create_vector(local.size(), local.n_elements()); + Vector::create_vector(communicator, local.size(), local.n_elements()); } @@ -108,9 +109,14 @@ namespace PETScWrappers if (size() != v.size()) { if (v.has_ghost_elements()) - reinit(v.locally_owned_elements(), v.ghost_indices, v.communicator); + reinit(v.locally_owned_elements(), + v.ghost_indices, + v.get_mpi_communicator()); else - reinit(v.communicator, v.size(), v.locally_owned_size(), true); + reinit(v.get_mpi_communicator(), + v.size(), + v.locally_owned_size(), + true); } PetscErrorCode ierr = VecCopy(v.vector, vector); @@ -133,19 +139,17 @@ namespace PETScWrappers { VectorBase::clear(); - create_vector(0, 0); + create_vector(MPI_COMM_SELF, 0, 0); } void - Vector::reinit(const MPI_Comm &comm, + Vector::reinit(const MPI_Comm &communicator, const size_type n, const size_type local_sz, const bool omit_zeroing_entries) { - communicator = comm; - // only do something if the sizes // mismatch (may not be true for every proc) @@ -170,7 +174,7 @@ namespace PETScWrappers const PetscErrorCode ierr = VecDestroy(&vector); AssertThrow(ierr == 0, ExcPETScError(ierr)); - create_vector(n, local_sz); + create_vector(communicator, n, local_sz); } // finally clear the new vector if so @@ -186,7 +190,9 @@ namespace PETScWrappers { if (v.has_ghost_elements()) { - reinit(v.locally_owned_elements(), v.ghost_indices, v.communicator); + reinit(v.locally_owned_elements(), + v.ghost_indices, + v.get_mpi_communicator()); if (!omit_zeroing_entries) { const PetscErrorCode ierr = VecSet(vector, 0.0); @@ -194,7 +200,7 @@ namespace PETScWrappers } } else - reinit(v.communicator, + reinit(v.get_mpi_communicator(), v.size(), v.locally_owned_size(), omit_zeroing_entries); @@ -210,14 +216,12 @@ namespace PETScWrappers const PetscErrorCode ierr = VecDestroy(&vector); AssertThrow(ierr == 0, ExcPETScError(ierr)); - communicator = comm; - Assert(local.is_ascending_and_one_to_one(comm), ExcNotImplemented()); IndexSet ghost_set = ghost; ghost_set.subtract_set(local); - create_vector(local.size(), local.n_elements(), ghost_set); + create_vector(comm, local.size(), local.n_elements(), ghost_set); } void @@ -226,11 +230,9 @@ namespace PETScWrappers const PetscErrorCode ierr = VecDestroy(&vector); AssertThrow(ierr == 0, ExcPETScError(ierr)); - communicator = comm; - Assert(local.is_ascending_and_one_to_one(comm), ExcNotImplemented()); Assert(local.size() > 0, ExcMessage("can not create vector of size 0.")); - create_vector(local.size(), local.n_elements()); + create_vector(comm, local.size(), local.n_elements()); } void @@ -244,7 +246,9 @@ namespace PETScWrappers void - Vector::create_vector(const size_type n, const size_type locally_owned_size) + Vector::create_vector(const MPI_Comm &communicator, + const size_type n, + const size_type locally_owned_size) { (void)n; AssertIndexRange(locally_owned_size, n + 1); @@ -262,7 +266,8 @@ namespace PETScWrappers void - Vector::create_vector(const size_type n, + Vector::create_vector(const MPI_Comm &communicator, + const size_type n, const size_type locally_owned_size, const IndexSet &ghostnodes) { @@ -327,7 +332,8 @@ namespace PETScWrappers # ifdef DEAL_II_WITH_MPI // in parallel, check that the vector // is zero on _all_ processors. - unsigned int num_nonzero = Utilities::MPI::sum(has_nonzero, communicator); + unsigned int num_nonzero = + Utilities::MPI::sum(has_nonzero, this->get_mpi_communicator()); return num_nonzero == 0; # else return has_nonzero == 0; @@ -372,6 +378,7 @@ namespace PETScWrappers // which is clearly slow, but nobody is going to print a whole // matrix this way on a regular basis for production runs, so // the slowness of the barrier doesn't matter + MPI_Comm communicator = this->get_mpi_communicator(); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(communicator); i++) diff --git a/source/lac/petsc_sparse_matrix.cc b/source/lac/petsc_sparse_matrix.cc index 194549925b..fd3d525056 100644 --- a/source/lac/petsc_sparse_matrix.cc +++ b/source/lac/petsc_sparse_matrix.cc @@ -122,18 +122,6 @@ namespace PETScWrappers - const MPI_Comm & - SparseMatrix::get_mpi_communicator() const - { - static MPI_Comm comm; - const PetscErrorCode ierr = - PetscObjectGetComm(reinterpret_cast(matrix), &comm); - AssertThrow(ierr == 0, ExcPETScError(ierr)); - return comm; - } - - - void SparseMatrix::do_reinit(const size_type m, const size_type n, diff --git a/source/lac/petsc_vector_base.cc b/source/lac/petsc_vector_base.cc index 671a198973..17c50e09b6 100644 --- a/source/lac/petsc_vector_base.cc +++ b/source/lac/petsc_vector_base.cc @@ -155,6 +155,7 @@ namespace PETScWrappers , ghosted(false) , last_action(::dealii::VectorOperation::unknown) { + /* TODO GHOSTED */ Assert(MultithreadInfo::is_running_single_threaded(), ExcMessage("PETSc does not support multi-threaded access, set " "the thread limit to 1 in MPI_InitFinalize().")); @@ -176,6 +177,20 @@ namespace PETScWrappers + void + VectorBase::assign_petsc_vector(Vec v) + { + /* TODO GHOSTED */ + AssertThrow(last_action == ::dealii::VectorOperation::unknown, + ExcMessage("Cannot assign a new Vec")); + PetscErrorCode ierr = + PetscObjectReference(reinterpret_cast(v)); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + ierr = VecDestroy(&vector); + AssertThrow(ierr == 0, ExcPETScError(ierr)); + vector = v; + } + void VectorBase::clear() { diff --git a/tests/petsc/block_vector_iterator_03.cc b/tests/petsc/block_vector_iterator_03.cc index 50a8944368..2f8a9a5691 100644 --- a/tests/petsc/block_vector_iterator_03.cc +++ b/tests/petsc/block_vector_iterator_03.cc @@ -70,6 +70,13 @@ test() // check that the two vectors are equal deallog << "Check 1: " << (v1 == v2 ? "true" : "false") << std::endl; + + // print vectors + v1.print(deallog.get_file_stream(), 10, true, false); + + // Extract the PETSc VECNEST and use print from PETScWrappers::VectorBase + PETScWrappers::VectorBase v1b(v1.petsc_vector()); + v1b.print(deallog.get_file_stream(), 10, true, false); }; // Check 2: loop forward and back diff --git a/tests/petsc/block_vector_iterator_03.output b/tests/petsc/block_vector_iterator_03.output index f19f759048..9a8fe105dc 100644 --- a/tests/petsc/block_vector_iterator_03.output +++ b/tests/petsc/block_vector_iterator_03.output @@ -1,5 +1,46 @@ DEAL::Check 1: true +Component 0 +[Proc 0 0-1] +0.0000000000e+00 +1.0000000000e+00 + +Component 1 +[Proc 0 0-3] +2.0000000000e+00 +3.0000000000e+00 +4.0000000000e+00 +5.0000000000e+00 + +Component 2 +[Proc 0 0-2] +6.0000000000e+00 +7.0000000000e+00 +8.0000000000e+00 + +Component 3 +[Proc 0 0-4] +9.0000000000e+00 +1.0000000000e+01 +1.1000000000e+01 +1.2000000000e+01 +1.3000000000e+01 + +0.0000000000e+00 +1.0000000000e+00 +2.0000000000e+00 +3.0000000000e+00 +4.0000000000e+00 +5.0000000000e+00 +6.0000000000e+00 +7.0000000000e+00 +8.0000000000e+00 +9.0000000000e+00 +1.0000000000e+01 +1.1000000000e+01 +1.2000000000e+01 +1.3000000000e+01 + DEAL::Check 2: true DEAL::Check 3: true DEAL::Check 4: true diff --git a/tests/petsc/copy_to_dealvec_block.cc b/tests/petsc/copy_to_dealvec_block.cc index ef4acbfeb3..cb6132c559 100644 --- a/tests/petsc/copy_to_dealvec_block.cc +++ b/tests/petsc/copy_to_dealvec_block.cc @@ -17,7 +17,7 @@ // Test // LinearAlgebra::distributed::Vector::operator=(PETScWrappers::MPI::BlockVector&) - +// and PETScWrappers::MPI::BlockVector interaction with PETSc Vecs #include #include @@ -117,6 +117,17 @@ test() ExcInternalError()); } + // Create new block vector from a PETSc VECNEST + PETScWrappers::MPI::BlockVector vb2(v.petsc_vector()); + Assert(vb2.n_blocks() == v.n_blocks(), ExcInternalError()); + Assert(vb2.size() == v.size(), ExcInternalError()); + for (unsigned int bl = 0; bl < 2; ++bl) + { + Assert(vb2.block(bl).size() == v.block(bl).size(), ExcInternalError()); + Assert(vb2.block(bl).petsc_vector() == v.block(bl).petsc_vector(), + ExcInternalError()); + } + // done if (myid == 0) deallog << "OK" << std::endl; -- 2.39.5