const std::vector<IndexSet> &ghost_indices,
const MPI_Comm & communicator);
+ /**
+ * Create a BlockVector with a PETSc Vec
+ */
+ explicit BlockVector(Vec v);
/**
* Destructor. Clears memory
*/
- ~BlockVector() override = default;
+ ~BlockVector() override;
/**
* Copy operator: fill all components of the vector that are locally
BlockVector &
operator=(const BlockVector &V);
+ /**
+ * This method assignes the PETSc Vec to the instance of the class.
+ *
+ */
+ void
+ assign_petsc_vector(Vec v);
+
/**
* Reinitialize the BlockVector to contain @p n_blocks of size @p
* block_size, each of which stores @p locally_owned_size elements
const std::vector<IndexSet> &ghost_entries,
const MPI_Comm & communicator);
+ /**
+ * This function collects the sizes of the sub-objects and stores them
+ * in internal arrays, in order to be able to relay global indices into
+ * the vector to indices into the subobjects. You *must* call this
+ * function each time after you have changed the size of the sub-
+ * objects.
+ */
+ void
+ collect_sizes();
+
/**
* Change the number of blocks to <tt>num_blocks</tt>. The individual
* blocks will get initialized with zero size, so it is assumed that the
const MPI_Comm &
get_mpi_communicator() const;
+ /**
+ * Return a reference to the underlying PETSc type. It can be used to
+ * modify the underlying data, so use it only when you know what you
+ * are doing.
+ */
+ Vec &
+ petsc_vector();
+
/**
* Swap the contents of this vector and the other vector <tt>v</tt>. One
* could do this operation with a temporary variable and copying over
* Exception
*/
DeclException0(ExcNonMatchingBlockVectors);
+
+ private:
+ void
+ setup_nest_vec();
+
+ Vec petsc_nest_vector = nullptr;
};
/** @} */
reinit(parallel_partitioning, ghost_indices, communicator);
}
+ inline BlockVector::BlockVector(Vec v)
+ : BlockVectorBase<Vector>()
+ {
+ this->assign_petsc_vector(v);
+ }
+
inline BlockVector &
BlockVector::operator=(const value_type s)
{
inline const MPI_Comm &
BlockVector::get_mpi_communicator() const
{
- return block(0).get_mpi_communicator();
+ static MPI_Comm comm = PETSC_COMM_SELF;
+ MPI_Comm pcomm =
+ PetscObjectComm(reinterpret_cast<PetscObject>(petsc_nest_vector));
+ if (pcomm != MPI_COMM_NULL)
+ comm = pcomm;
+ return comm;
}
inline bool
BlockVector::swap(BlockVector &v)
{
std::swap(this->components, v.components);
+ std::swap(this->petsc_nest_vector, v.petsc_nest_vector);
::dealii::swap(this->block_indices, v.block_indices);
}
{
u.swap(v);
}
-
} // namespace MPI
} // namespace PETScWrappers
reinit(const size_type m, const size_type n);
- /**
- * Return a reference to the MPI communicator object in use with this
- * matrix. Since this is a sequential matrix, it returns the MPI_COMM_SELF
- * communicator.
- */
- virtual const MPI_Comm &
- get_mpi_communicator() const override;
-
private:
/**
* Do the actual work for the respective reinit() function and the
/**
* Return a reference to the MPI communicator object in use with this
- * matrix. If not implemented, it returns the communicator used by the
- * PETSc Mat.
+ * matrix.
*/
- virtual const MPI_Comm &
+ const MPI_Comm &
get_mpi_communicator() const;
/**
inline const MPI_Comm &
MatrixBase::get_mpi_communicator() const
{
- static MPI_Comm comm;
- PetscObjectGetComm(reinterpret_cast<PetscObject>(matrix), &comm);
+ static MPI_Comm comm = PETSC_COMM_SELF;
+ MPI_Comm pcomm = PetscObjectComm(reinterpret_cast<PetscObject>(matrix));
+ if (pcomm != MPI_COMM_NULL)
+ comm = pcomm;
return comm;
}
void
clear();
- /**
- * Return a reference to the MPI communicator object in use with this
- * matrix.
- */
- const MPI_Comm &
- get_mpi_communicator() const override;
-
/**
* Matrix-vector multiplication: let <i>dst = M*src</i> with <i>M</i>
* being this matrix.
vmult(Vec &dst, const Vec &src) const;
private:
- /**
- * Copy of the communicator object to be used for this parallel matrix-
- * free object.
- */
- MPI_Comm communicator;
-
/**
* Callback-function registered as the matrix-vector multiplication of
* this matrix-free object called by PETSc routines. This function must be
* previous matrix is left to the caller.
*/
void
- do_reinit(const unsigned int m,
+ do_reinit(const MPI_Comm & comm,
+ const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
const unsigned int local_columns);
- // -------- template and inline functions ----------
-
- inline const MPI_Comm &
- MatrixFree::get_mpi_communicator() const
- {
- return communicator;
- }
} // namespace PETScWrappers
DEAL_II_NAMESPACE_CLOSE
reinit(const SparsityPatternType &sparsity_pattern,
const bool preset_nonzero_locations = true);
- /**
- * Return a reference to the MPI communicator object in use with this
- * matrix. Since this is a sequential matrix, it returns the MPI_COMM_SELF
- * communicator.
- */
- virtual const MPI_Comm &
- get_mpi_communicator() const override;
-
/**
* Return the number of rows of this matrix.
*/
private:
/**
* Do the actual work for the respective reinit() function and the
- * matching constructor, i.e. create a matrix. Getting rid of the previous
- * matrix is left to the caller.
+ * matching constructor, i.e. create a matrix.
*/
void
do_reinit(const size_type m,
const SparsityPatternType &sparsity_pattern,
const MPI_Comm & communicator);
- /**
- * Return a reference to the MPI communicator object in use with this
- * matrix.
- */
- virtual const MPI_Comm &
- get_mpi_communicator() const override;
-
/**
* @addtogroup Exceptions
* @{
const MPI::Vector & V = MPI::Vector()) const;
private:
- /**
- * Copy of the communicator object to be used for this parallel vector.
- */
- MPI_Comm communicator;
-
/**
* Same as previous functions.
*/
template <typename SparsityPatternType>
void
- do_reinit(const SparsityPatternType & sparsity_pattern,
+ do_reinit(const MPI_Comm & comm,
+ const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
const unsigned int this_process,
*/
template <typename SparsityPatternType>
void
- do_reinit(const IndexSet & local_rows,
+ do_reinit(const MPI_Comm & comm,
+ const IndexSet & local_rows,
const IndexSet & local_columns,
const SparsityPatternType &sparsity_pattern);
*/
template <typename SparsityPatternType>
void
- do_reinit(const IndexSet & local_rows,
+ do_reinit(const MPI_Comm & comm,
+ const IndexSet & local_rows,
const IndexSet & local_active_rows,
const IndexSet & local_columns,
const IndexSet & local_active_columns,
friend class BlockMatrixBase<SparseMatrix>;
};
-
-
- // -------- template and inline functions ----------
-
- inline const MPI_Comm &
- SparseMatrix::get_mpi_communicator() const
- {
- return communicator;
- }
} // namespace MPI
} // namespace PETScWrappers
reinit(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
- /**
- * Return a reference to the MPI communicator object in use with this
- * vector.
- */
- const MPI_Comm &
- get_mpi_communicator() const override;
-
/**
* Print to a stream. @p precision denotes the desired precision with
* which values shall be printed, @p scientific whether scientific
* locally.
*/
virtual void
- create_vector(const size_type n, const size_type locally_owned_size);
+ create_vector(const MPI_Comm &comm,
+ const size_type n,
+ const size_type locally_owned_size);
* you need to call update_ghost_values() before accessing those.
*/
virtual void
- create_vector(const size_type n,
+ create_vector(const MPI_Comm &comm,
+ const size_type n,
const size_type locally_owned_size,
const IndexSet &ghostnodes);
-
-
- private:
- /**
- * Copy of the communicator object to be used for this parallel vector.
- */
- MPI_Comm communicator;
};
Vector::Vector(const MPI_Comm & communicator,
const dealii::Vector<number> &v,
const size_type locally_owned_size)
- : communicator(communicator)
{
- Vector::create_vector(v.size(), locally_owned_size);
+ Vector::create_vector(communicator, v.size(), locally_owned_size);
*this = v;
}
- inline const MPI_Comm &
- Vector::get_mpi_communicator() const
- {
- return communicator;
- }
-
# endif // DOXYGEN
} // namespace MPI
} // namespace PETScWrappers
VectorBase &
operator=(const PetscScalar s);
+ /**
+ * This method assignes the PETSc Vec to the instance of the class.
+ * This is particularly useful when performing PETSc to Deal.II operations
+ * since it allows to reuse the Deal.II VectorBase and the PETSc Vec
+ * without incurring in memory copies.
+ */
+ void
+ assign_petsc_vector(Vec v);
+
/**
* Test for equality. This function assumes that the present vector and
* the one to compare with have the same size already, since comparing
* Return a reference to the MPI communicator object in use with this
* object.
*/
- virtual const MPI_Comm &
+ const MPI_Comm &
get_mpi_communicator() const;
protected:
inline const MPI_Comm &
VectorBase::get_mpi_communicator() const
{
- static MPI_Comm comm;
- PetscObjectGetComm(reinterpret_cast<PetscObject>(vector), &comm);
+ static MPI_Comm comm = PETSC_COMM_SELF;
+ MPI_Comm pcomm = PetscObjectComm(reinterpret_cast<PetscObject>(vector));
+ if (pcomm != MPI_COMM_NULL)
+ comm = pcomm;
return comm;
}
AssertThrow(ierr == 0, ExcPETScError(ierr));
}
-
- const MPI_Comm &
- FullMatrix::get_mpi_communicator() const
- {
- static const MPI_Comm communicator = MPI_COMM_SELF;
- return communicator;
- }
} // namespace PETScWrappers
namespace PETScWrappers
{
MatrixFree::MatrixFree()
- : communicator(PETSC_COMM_SELF)
{
const int m = 0;
- do_reinit(m, m, m, m);
+ do_reinit(MPI_COMM_SELF, m, m, m, m);
}
const unsigned int n,
const unsigned int local_rows,
const unsigned int local_columns)
- : communicator(communicator)
{
- do_reinit(m, n, local_rows, local_columns);
+ do_reinit(communicator, m, n, local_rows, local_columns);
}
const std::vector<unsigned int> &local_rows_per_process,
const std::vector<unsigned int> &local_columns_per_process,
const unsigned int this_process)
- : communicator(communicator)
{
Assert(local_rows_per_process.size() == local_columns_per_process.size(),
ExcDimensionMismatch(local_rows_per_process.size(),
local_columns_per_process.size()));
Assert(this_process < local_rows_per_process.size(), ExcInternalError());
- do_reinit(m,
+ do_reinit(communicator,
+ m,
n,
local_rows_per_process[this_process],
local_columns_per_process[this_process]);
const unsigned int n,
const unsigned int local_rows,
const unsigned int local_columns)
- : communicator(MPI_COMM_WORLD)
{
- do_reinit(m, n, local_rows, local_columns);
+ do_reinit(MPI_COMM_WORLD, m, n, local_rows, local_columns);
}
const std::vector<unsigned int> &local_rows_per_process,
const std::vector<unsigned int> &local_columns_per_process,
const unsigned int this_process)
- : communicator(MPI_COMM_WORLD)
{
Assert(local_rows_per_process.size() == local_columns_per_process.size(),
ExcDimensionMismatch(local_rows_per_process.size(),
local_columns_per_process.size()));
Assert(this_process < local_rows_per_process.size(), ExcInternalError());
- do_reinit(m,
+ do_reinit(MPI_COMM_WORLD,
+ m,
n,
local_rows_per_process[this_process],
local_columns_per_process[this_process]);
const unsigned int local_rows,
const unsigned int local_columns)
{
- this->communicator = communicator;
-
// destroy the matrix and generate a new one
const PetscErrorCode ierr = destroy_matrix(matrix);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- do_reinit(m, n, local_rows, local_columns);
+ do_reinit(communicator, m, n, local_rows, local_columns);
}
local_columns_per_process.size()));
Assert(this_process < local_rows_per_process.size(), ExcInternalError());
- this->communicator = communicator;
const PetscErrorCode ierr = destroy_matrix(matrix);
AssertThrow(ierr != 0, ExcPETScError(ierr));
- do_reinit(m,
+ do_reinit(communicator,
+ m,
n,
local_rows_per_process[this_process],
local_columns_per_process[this_process]);
const unsigned int local_rows,
const unsigned int local_columns)
{
- reinit(this->communicator, m, n, local_rows, local_columns);
+ reinit(this->get_mpi_communicator(), m, n, local_rows, local_columns);
}
const std::vector<unsigned int> &local_columns_per_process,
const unsigned int this_process)
{
- reinit(this->communicator,
+ reinit(this->get_mpi_communicator(),
m,
n,
local_rows_per_process,
AssertThrow(ierr == 0, ExcPETScError(ierr));
const int m = 0;
- do_reinit(m, m, m, m);
+ do_reinit(MPI_COMM_SELF, m, m, m, m);
}
void
- MatrixFree::do_reinit(const unsigned int m,
+ MatrixFree::do_reinit(const MPI_Comm & communicator,
+ const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
const unsigned int local_columns)
const MPI_Comm &
BlockSparseMatrix::get_mpi_communicator() const
{
- return block(0, 0).get_mpi_communicator();
+ static MPI_Comm comm = PETSC_COMM_SELF;
+ MPI_Comm pcomm =
+ PetscObjectComm(reinterpret_cast<PetscObject>(petsc_nest_matrix));
+ if (pcomm != MPI_COMM_NULL)
+ comm = pcomm;
+ return comm;
}
BlockSparseMatrix::operator const Mat &() const
{
using size_type = types::global_dof_index;
+ BlockVector::~BlockVector()
+ {
+ PetscErrorCode ierr = VecDestroy(&petsc_nest_vector);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+ }
+
void
BlockVector::reinit(const unsigned int num_blocks)
{
collect_sizes();
}
+
+ void
+ BlockVector::assign_petsc_vector(Vec v)
+ {
+ PetscBool isnest;
+
+ PetscErrorCode ierr =
+ PetscObjectTypeCompare(reinterpret_cast<PetscObject>(v),
+ VECNEST,
+ &isnest);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+ std::vector<Vec> sv;
+ if (isnest)
+ {
+ PetscInt nb;
+ ierr = VecNestGetSize(v, &nb);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+ for (PetscInt i = 0; i < nb; ++i)
+ {
+ Vec vv;
+ ierr = VecNestGetSubVec(v, i, &vv);
+ sv.push_back(vv);
+ }
+ }
+ else
+ {
+ sv.push_back(v);
+ }
+
+ auto nb = sv.size();
+
+ std::vector<size_type> block_sizes(nb, 0);
+ this->block_indices.reinit(block_sizes);
+
+ this->components.resize(nb);
+ for (unsigned int i = 0; i < nb; ++i)
+ {
+ this->components[i].assign_petsc_vector(sv[i]);
+ }
+
+ this->collect_sizes();
+ }
+
+ Vec &
+ BlockVector::petsc_vector()
+ {
+ return petsc_nest_vector;
+ }
+
+ void
+ BlockVector::collect_sizes()
+ {
+ BlockVectorBase::collect_sizes();
+ setup_nest_vec();
+ }
+
+ void
+ BlockVector::setup_nest_vec()
+ {
+ PetscErrorCode ierr = VecDestroy(&petsc_nest_vector);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+
+ auto n = this->n_blocks();
+
+ std::vector<Vec> pcomponents(n);
+ for (unsigned int i = 0; i < n; i++)
+ pcomponents[i] = this->components[i].petsc_vector();
+
+ MPI_Comm comm =
+ pcomponents.size() > 0 ?
+ PetscObjectComm(reinterpret_cast<PetscObject>(pcomponents[0])) :
+ PETSC_COMM_SELF;
+
+ ierr =
+ VecCreateNest(comm, n, NULL, pcomponents.data(), &petsc_nest_vector);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+ }
} // namespace MPI
} // namespace PETScWrappers
namespace MPI
{
SparseMatrix::SparseMatrix()
- : communicator(MPI_COMM_SELF)
{
// just like for vectors: since we
// create an empty matrix, we can as
const std::vector<size_type> &local_columns_per_process,
const unsigned int this_process,
const bool preset_nonzero_locations)
- : communicator(communicator)
{
- do_reinit(sparsity_pattern,
+ do_reinit(communicator,
+ sparsity_pattern,
local_rows_per_process,
local_columns_per_process,
this_process,
if (&other == this)
return;
- this->communicator = other.communicator;
-
PetscErrorCode ierr = destroy_matrix(matrix);
AssertThrow(ierr == 0, ExcPETScError(ierr));
const SparsityPatternType &sparsity_pattern,
const MPI_Comm & communicator)
{
- this->communicator = communicator;
-
// get rid of old matrix and generate a new one
const PetscErrorCode ierr = destroy_matrix(matrix);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- do_reinit(local_rows,
+ do_reinit(communicator,
+ local_rows,
local_active_rows,
local_columns,
local_active_columns,
if (&other == this)
return;
- this->communicator = other.communicator;
-
const PetscErrorCode ierr =
MatCopy(other.matrix, matrix, SAME_NONZERO_PATTERN);
AssertThrow(ierr == 0, ExcPETScError(ierr));
const unsigned int this_process,
const bool preset_nonzero_locations)
{
- this->communicator = communicator;
-
// get rid of old matrix and generate a new one
const PetscErrorCode ierr = destroy_matrix(matrix);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- do_reinit(sparsity_pattern,
+ do_reinit(communicator,
+ sparsity_pattern,
local_rows_per_process,
local_columns_per_process,
this_process,
const SparsityPatternType &sparsity_pattern,
const MPI_Comm & communicator)
{
- this->communicator = communicator;
-
// get rid of old matrix and generate a new one
const PetscErrorCode ierr = destroy_matrix(matrix);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- do_reinit(local_rows, local_columns, sparsity_pattern);
+ do_reinit(communicator, local_rows, local_columns, sparsity_pattern);
}
template <typename SparsityPatternType>
void
- SparseMatrix::do_reinit(const IndexSet & local_rows,
+ SparseMatrix::do_reinit(const MPI_Comm & communicator,
+ const IndexSet & local_rows,
const IndexSet & local_columns,
const SparsityPatternType &sparsity_pattern)
{
template <typename SparsityPatternType>
void
SparseMatrix::do_reinit(
+ const MPI_Comm & communicator,
const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
// BDDC
template <typename SparsityPatternType>
void
- SparseMatrix::do_reinit(const IndexSet & local_rows,
+ SparseMatrix::do_reinit(const MPI_Comm & communicator,
+ const IndexSet & local_rows,
const IndexSet & local_active_rows,
const IndexSet & local_columns,
const IndexSet & local_active_columns,
const MPI_Comm &);
template void
- SparseMatrix::do_reinit(const SparsityPattern &,
+ SparseMatrix::do_reinit(const MPI_Comm &,
+ const SparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
const unsigned int,
const bool);
template void
- SparseMatrix::do_reinit(const DynamicSparsityPattern &,
+ SparseMatrix::do_reinit(const MPI_Comm &,
+ const DynamicSparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
const unsigned int,
const bool);
template void
- SparseMatrix::do_reinit(const IndexSet &,
+ SparseMatrix::do_reinit(const MPI_Comm &,
+ const IndexSet &,
const IndexSet &,
const SparsityPattern &);
template void
- SparseMatrix::do_reinit(const IndexSet &,
+ SparseMatrix::do_reinit(const MPI_Comm &,
+ const IndexSet &,
const IndexSet &,
const DynamicSparsityPattern &);
const MPI_Comm &);
template void
- SparseMatrix::do_reinit(const IndexSet &,
+ SparseMatrix::do_reinit(const MPI_Comm &,
+ const IndexSet &,
const IndexSet &,
const IndexSet &,
const IndexSet &,
const SparsityPattern &);
template void
- SparseMatrix::do_reinit(const IndexSet &,
+ SparseMatrix::do_reinit(const MPI_Comm &,
+ const IndexSet &,
const IndexSet &,
const IndexSet &,
const IndexSet &,
namespace MPI
{
Vector::Vector()
- : communicator(MPI_COMM_SELF)
{
// virtual functions called in constructors and destructors never use the
// override in a derived class
// for clarity be explicit on which function is called
- Vector::create_vector(0, 0);
+ Vector::create_vector(MPI_COMM_SELF, 0, 0);
}
Vector::Vector(const MPI_Comm &communicator,
const size_type n,
const size_type locally_owned_size)
- : communicator(communicator)
{
- Vector::create_vector(n, locally_owned_size);
+ Vector::create_vector(communicator, n, locally_owned_size);
}
Vector::Vector(const IndexSet &local,
const IndexSet &ghost,
const MPI_Comm &communicator)
- : communicator(communicator)
{
Assert(local.is_ascending_and_one_to_one(communicator),
ExcNotImplemented());
IndexSet ghost_set = ghost;
ghost_set.subtract_set(local);
- Vector::create_vector(local.size(), local.n_elements(), ghost_set);
+ Vector::create_vector(communicator,
+ local.size(),
+ local.n_elements(),
+ ghost_set);
}
Vector::Vector(const Vector &v)
: VectorBase()
- , communicator(v.communicator)
{
if (v.has_ghost_elements())
- Vector::create_vector(v.size(),
+ Vector::create_vector(v.get_mpi_communicator(),
+ v.size(),
v.locally_owned_size(),
v.ghost_indices);
else
- Vector::create_vector(v.size(), v.locally_owned_size());
+ Vector::create_vector(v.get_mpi_communicator(),
+ v.size(),
+ v.locally_owned_size());
this->operator=(v);
}
Vector::Vector(const IndexSet &local, const MPI_Comm &communicator)
- : communicator(communicator)
{
Assert(local.is_ascending_and_one_to_one(communicator),
ExcNotImplemented());
- Vector::create_vector(local.size(), local.n_elements());
+ Vector::create_vector(communicator, local.size(), local.n_elements());
}
if (size() != v.size())
{
if (v.has_ghost_elements())
- reinit(v.locally_owned_elements(), v.ghost_indices, v.communicator);
+ reinit(v.locally_owned_elements(),
+ v.ghost_indices,
+ v.get_mpi_communicator());
else
- reinit(v.communicator, v.size(), v.locally_owned_size(), true);
+ reinit(v.get_mpi_communicator(),
+ v.size(),
+ v.locally_owned_size(),
+ true);
}
PetscErrorCode ierr = VecCopy(v.vector, vector);
{
VectorBase::clear();
- create_vector(0, 0);
+ create_vector(MPI_COMM_SELF, 0, 0);
}
void
- Vector::reinit(const MPI_Comm &comm,
+ Vector::reinit(const MPI_Comm &communicator,
const size_type n,
const size_type local_sz,
const bool omit_zeroing_entries)
{
- communicator = comm;
-
// only do something if the sizes
// mismatch (may not be true for every proc)
const PetscErrorCode ierr = VecDestroy(&vector);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- create_vector(n, local_sz);
+ create_vector(communicator, n, local_sz);
}
// finally clear the new vector if so
{
if (v.has_ghost_elements())
{
- reinit(v.locally_owned_elements(), v.ghost_indices, v.communicator);
+ reinit(v.locally_owned_elements(),
+ v.ghost_indices,
+ v.get_mpi_communicator());
if (!omit_zeroing_entries)
{
const PetscErrorCode ierr = VecSet(vector, 0.0);
}
}
else
- reinit(v.communicator,
+ reinit(v.get_mpi_communicator(),
v.size(),
v.locally_owned_size(),
omit_zeroing_entries);
const PetscErrorCode ierr = VecDestroy(&vector);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- communicator = comm;
-
Assert(local.is_ascending_and_one_to_one(comm), ExcNotImplemented());
IndexSet ghost_set = ghost;
ghost_set.subtract_set(local);
- create_vector(local.size(), local.n_elements(), ghost_set);
+ create_vector(comm, local.size(), local.n_elements(), ghost_set);
}
void
const PetscErrorCode ierr = VecDestroy(&vector);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- communicator = comm;
-
Assert(local.is_ascending_and_one_to_one(comm), ExcNotImplemented());
Assert(local.size() > 0, ExcMessage("can not create vector of size 0."));
- create_vector(local.size(), local.n_elements());
+ create_vector(comm, local.size(), local.n_elements());
}
void
void
- Vector::create_vector(const size_type n, const size_type locally_owned_size)
+ Vector::create_vector(const MPI_Comm &communicator,
+ const size_type n,
+ const size_type locally_owned_size)
{
(void)n;
AssertIndexRange(locally_owned_size, n + 1);
void
- Vector::create_vector(const size_type n,
+ Vector::create_vector(const MPI_Comm &communicator,
+ const size_type n,
const size_type locally_owned_size,
const IndexSet &ghostnodes)
{
# ifdef DEAL_II_WITH_MPI
// in parallel, check that the vector
// is zero on _all_ processors.
- unsigned int num_nonzero = Utilities::MPI::sum(has_nonzero, communicator);
+ unsigned int num_nonzero =
+ Utilities::MPI::sum(has_nonzero, this->get_mpi_communicator());
return num_nonzero == 0;
# else
return has_nonzero == 0;
// which is clearly slow, but nobody is going to print a whole
// matrix this way on a regular basis for production runs, so
// the slowness of the barrier doesn't matter
+ MPI_Comm communicator = this->get_mpi_communicator();
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(communicator);
i++)
- const MPI_Comm &
- SparseMatrix::get_mpi_communicator() const
- {
- static MPI_Comm comm;
- const PetscErrorCode ierr =
- PetscObjectGetComm(reinterpret_cast<PetscObject>(matrix), &comm);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
- return comm;
- }
-
-
-
void
SparseMatrix::do_reinit(const size_type m,
const size_type n,
, ghosted(false)
, last_action(::dealii::VectorOperation::unknown)
{
+ /* TODO GHOSTED */
Assert(MultithreadInfo::is_running_single_threaded(),
ExcMessage("PETSc does not support multi-threaded access, set "
"the thread limit to 1 in MPI_InitFinalize()."));
+ void
+ VectorBase::assign_petsc_vector(Vec v)
+ {
+ /* TODO GHOSTED */
+ AssertThrow(last_action == ::dealii::VectorOperation::unknown,
+ ExcMessage("Cannot assign a new Vec"));
+ PetscErrorCode ierr =
+ PetscObjectReference(reinterpret_cast<PetscObject>(v));
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+ ierr = VecDestroy(&vector);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+ vector = v;
+ }
+
void
VectorBase::clear()
{
// check that the two vectors are equal
deallog << "Check 1: " << (v1 == v2 ? "true" : "false") << std::endl;
+
+ // print vectors
+ v1.print(deallog.get_file_stream(), 10, true, false);
+
+ // Extract the PETSc VECNEST and use print from PETScWrappers::VectorBase
+ PETScWrappers::VectorBase v1b(v1.petsc_vector());
+ v1b.print(deallog.get_file_stream(), 10, true, false);
};
// Check 2: loop forward and back
DEAL::Check 1: true
+Component 0
+[Proc 0 0-1]
+0.0000000000e+00
+1.0000000000e+00
+
+Component 1
+[Proc 0 0-3]
+2.0000000000e+00
+3.0000000000e+00
+4.0000000000e+00
+5.0000000000e+00
+
+Component 2
+[Proc 0 0-2]
+6.0000000000e+00
+7.0000000000e+00
+8.0000000000e+00
+
+Component 3
+[Proc 0 0-4]
+9.0000000000e+00
+1.0000000000e+01
+1.1000000000e+01
+1.2000000000e+01
+1.3000000000e+01
+
+0.0000000000e+00
+1.0000000000e+00
+2.0000000000e+00
+3.0000000000e+00
+4.0000000000e+00
+5.0000000000e+00
+6.0000000000e+00
+7.0000000000e+00
+8.0000000000e+00
+9.0000000000e+00
+1.0000000000e+01
+1.1000000000e+01
+1.2000000000e+01
+1.3000000000e+01
+
DEAL::Check 2: true
DEAL::Check 3: true
DEAL::Check 4: true
// Test
// LinearAlgebra::distributed::Vector::operator=(PETScWrappers::MPI::BlockVector&)
-
+// and PETScWrappers::MPI::BlockVector interaction with PETSc Vecs
#include <deal.II/base/index_set.h>
#include <deal.II/lac/la_parallel_block_vector.h>
ExcInternalError());
}
+ // Create new block vector from a PETSc VECNEST
+ PETScWrappers::MPI::BlockVector vb2(v.petsc_vector());
+ Assert(vb2.n_blocks() == v.n_blocks(), ExcInternalError());
+ Assert(vb2.size() == v.size(), ExcInternalError());
+ for (unsigned int bl = 0; bl < 2; ++bl)
+ {
+ Assert(vb2.block(bl).size() == v.block(bl).size(), ExcInternalError());
+ Assert(vb2.block(bl).petsc_vector() == v.block(bl).petsc_vector(),
+ ExcInternalError());
+ }
+
// done
if (myid == 0)
deallog << "OK" << std::endl;