add(const PetscScalar factor, const MatrixBase &other);
- /**
- * Add the matrix @p other scaled by the factor @p factor to the current
- * matrix.
- * @deprecated Use the function with order of arguments reversed instead.
- */
- DEAL_II_DEPRECATED
- MatrixBase &
- add(const MatrixBase &other, const PetscScalar factor);
-
/**
* Matrix-vector multiplication: let <i>dst = M*src</i> with <i>M</i>
* being this matrix.
*/
~SparseMatrix() override;
- /**
- * Create a sparse matrix of dimensions @p m times @p n, with an initial
- * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row
- * nonzero elements per row (see documentation of the MatCreateAIJ PETSc
- * function for more information about these parameters). PETSc is able
- * to cope with the situation that more than this number of elements are
- * later allocated for a row, but this involves copying data, and is
- * thus expensive.
- *
- * For the meaning of the @p local_row and @p local_columns parameters,
- * see the class documentation.
- *
- * The @p is_symmetric flag determines whether we should tell PETSc that
- * the matrix is going to be symmetric (as indicated by the call
- * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
- * documentation states that one cannot form an ILU decomposition of a
- * matrix for which this flag has been set to @p true, only an ICC. The
- * default value of this flag is @p false.
- *
- * @deprecated This constructor is deprecated: please use the
- * constructor with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- SparseMatrix(const MPI_Comm &communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric = false,
- const size_type n_offdiag_nonzero_per_row = 0);
-
- /**
- * Initialize a rectangular matrix with @p m rows and @p n columns. The
- * maximal number of nonzero entries for diagonal and off- diagonal
- * blocks of each row is given by the @p row_lengths and @p
- * offdiag_row_lengths arrays.
- *
- * For the meaning of the @p local_row and @p local_columns parameters,
- * see the class documentation.
- *
- * Just as for the other constructors: PETSc is able to cope with the
- * situation that more than this number of elements are later allocated
- * for a row, but this involves copying data, and is thus expensive.
- *
- * The @p is_symmetric flag determines whether we should tell PETSc that
- * the matrix is going to be symmetric (as indicated by the call
- * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
- * documentation states that one cannot form an ILU decomposition of a
- * matrix for which this flag has been set to @p true, only an ICC. The
- * default value of this flag is @p false.
- *
- * @deprecated This constructor is deprecated: please use the
- * constructor with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- SparseMatrix(const MPI_Comm & communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric = false,
- const std::vector<size_type> &offdiag_row_lengths =
- std::vector<size_type>());
-
/**
* Initialize using the given sparsity pattern with communication
* happening over the provided @p communicator.
void
copy_from(const SparseMatrix &other);
- /**
- * Throw away the present matrix and generate one that has the same
- * properties as if it were created by the constructor of this class
- * with the same argument list as the present function.
- *
- * @deprecated This overload of <code>reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- reinit(const MPI_Comm &communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric = false,
- const size_type n_offdiag_nonzero_per_row = 0);
-
- /**
- * Throw away the present matrix and generate one that has the same
- * properties as if it were created by the constructor of this class
- * with the same argument list as the present function.
- *
- * @deprecated This overload of <code>reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- reinit(const MPI_Comm & communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric = false,
- const std::vector<size_type> &offdiag_row_lengths =
- std::vector<size_type>());
-
/**
* Initialize using the given sparsity pattern with communication
* happening over the provided @p communicator.
*/
MPI_Comm communicator;
- /**
- * Do the actual work for the respective reinit() function and the
- * matching constructor, i.e. create a matrix. Getting rid of the
- * previous matrix is left to the caller.
- *
- * @deprecated This overload of <code>do_reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- do_reinit(const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric = false,
- const size_type n_offdiag_nonzero_per_row = 0);
-
- /**
- * Same as previous function.
- *
- * @deprecated This overload of <code>do_reinit</code> is deprecated:
- * please use the overload with a sparsity pattern argument instead.
- */
- DEAL_II_DEPRECATED
- void
- do_reinit(const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric = false,
- const std::vector<size_type> &offdiag_row_lengths =
- std::vector<size_type>());
-
/**
* Same as previous functions.
*/
}
+
MatrixBase &
MatrixBase::add(const PetscScalar factor, const MatrixBase &other)
{
- MatrixBase &
- MatrixBase::add(const MatrixBase &other, const PetscScalar factor)
- {
- return add(factor, other);
- }
-
-
void
MatrixBase::vmult(VectorBase &dst, const VectorBase &src) const
{
destroy_matrix(matrix);
}
- SparseMatrix::SparseMatrix(const MPI_Comm &communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric,
- const size_type n_offdiag_nonzero_per_row)
- : communicator(communicator)
- {
- do_reinit(m,
- n,
- local_rows,
- local_columns,
- n_nonzero_per_row,
- is_symmetric,
- n_offdiag_nonzero_per_row);
- }
-
-
-
- SparseMatrix::SparseMatrix(
- const MPI_Comm & communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric,
- const std::vector<size_type> &offdiag_row_lengths)
- : communicator(communicator)
- {
- do_reinit(m,
- n,
- local_rows,
- local_columns,
- row_lengths,
- is_symmetric,
- offdiag_row_lengths);
- }
-
template <typename SparsityPatternType>
AssertThrow(ierr == 0, ExcPETScError(ierr));
}
- void
- SparseMatrix::reinit(const MPI_Comm &communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric,
- const size_type n_offdiag_nonzero_per_row)
- {
- this->communicator = communicator;
-
- // get rid of old matrix and generate a new one
- const PetscErrorCode ierr = destroy_matrix(matrix);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
-
- do_reinit(m,
- n,
- local_rows,
- local_columns,
- n_nonzero_per_row,
- is_symmetric,
- n_offdiag_nonzero_per_row);
- }
-
-
-
- void
- SparseMatrix::reinit(const MPI_Comm & communicator,
- const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric,
- const std::vector<size_type> &offdiag_row_lengths)
- {
- this->communicator = communicator;
-
- // get rid of old matrix and generate a
- // new one
- const PetscErrorCode ierr = destroy_matrix(matrix);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
-
- do_reinit(m,
- n,
- local_rows,
- local_columns,
- row_lengths,
- is_symmetric,
- offdiag_row_lengths);
- }
-
template <typename SparsityPatternType>
preset_nonzero_locations);
}
+
+
template <typename SparsityPatternType>
void
SparseMatrix::reinit(const IndexSet & local_rows,
do_reinit(local_rows, local_columns, sparsity_pattern);
}
- void
- SparseMatrix::do_reinit(const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const size_type n_nonzero_per_row,
- const bool is_symmetric,
- const size_type n_offdiag_nonzero_per_row)
- {
- Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m));
-
- // use the call sequence indicating only
- // a maximal number of elements per row
- // for all rows globally
- const PetscErrorCode ierr = MatCreateAIJ(communicator,
- local_rows,
- local_columns,
- m,
- n,
- n_nonzero_per_row,
- nullptr,
- n_offdiag_nonzero_per_row,
- nullptr,
- &matrix);
- set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
-
- // set symmetric flag, if so requested
- if (is_symmetric == true)
- {
- set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE);
- }
- }
-
-
-
- void
- SparseMatrix::do_reinit(const size_type m,
- const size_type n,
- const size_type local_rows,
- const size_type local_columns,
- const std::vector<size_type> &row_lengths,
- const bool is_symmetric,
- const std::vector<size_type> &offdiag_row_lengths)
- {
- Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m));
-
- Assert(row_lengths.size() == m,
- ExcDimensionMismatch(row_lengths.size(), m));
-
- // For the case that local_columns is smaller than one of the row lengths
- // MatCreateMPIAIJ throws an error. In this case use a
- // PETScWrappers::SparseMatrix
- for (const size_type row_length : row_lengths)
- {
- (void)row_length;
- Assert(row_length <= local_columns,
- ExcIndexRange(row_length, 1, local_columns + 1));
- }
-
- // use the call sequence indicating a
- // maximal number of elements for each
- // row individually. annoyingly, we
- // always use unsigned ints for cases
- // like this, while PETSc wants to see
- // signed integers. so we have to
- // convert, unless we want to play dirty
- // tricks with conversions of pointers
- const std::vector<PetscInt> int_row_lengths(row_lengths.begin(),
- row_lengths.end());
- const std::vector<PetscInt> int_offdiag_row_lengths(
- offdiag_row_lengths.begin(), offdiag_row_lengths.end());
-
- // TODO: There must be a significantly better way to provide information
- // about the off-diagonal blocks of the matrix. this way, petsc keeps
- // allocating tiny chunks of memory, and gets completely hung up over this
- const PetscErrorCode ierr =
- MatCreateAIJ(communicator,
- local_rows,
- local_columns,
- m,
- n,
- 0,
- int_row_lengths.data(),
- 0,
- offdiag_row_lengths.size() ?
- int_offdiag_row_lengths.data() :
- nullptr,
- &matrix);
-
- // TODO: Sometimes the actual number of nonzero entries allocated is
- // greater than the number of nonzero entries, which petsc will complain
- // about unless explicitly disabled with MatSetOption. There is probably a
- // way to prevent a different number nonzero elements being allocated in
- // the first place. (See also previous TODO).
- set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
-
- // set symmetric flag, if so requested
- if (is_symmetric == true)
- {
- set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE);
- }
- }
template <typename SparsityPatternType>
// PETScWrappers::MatrixBase::operator=
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/vector.h>
// check
// PETScWrappers::MPI::SparseMatrix
- MPI_Comm mpi_communicator(MPI_COMM_WORLD);
- int n_jobs = 1;
- MPI_Comm_size(mpi_communicator, &n_jobs);
- const unsigned int n_mpi_processes = static_cast<unsigned int>(n_jobs);
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes =
+ Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int my_id =
+ Utilities::MPI::this_mpi_process(mpi_communicator);
Assert(n_dofs % n_mpi_processes == 0, ExcInternalError());
const unsigned int n_local_dofs = n_dofs / n_mpi_processes;
- PETScWrappers::MPI::SparseMatrix v2(
- mpi_communicator, n_dofs, n_dofs, n_local_dofs, n_local_dofs, 5);
+ IndexSet locally_owned_dofs(n_dofs);
+ locally_owned_dofs.add_range(my_id * n_dofs / n_mpi_processes,
+ (my_id + 1) * n_dofs / n_mpi_processes);
+ locally_relevant_dofs.add_index(0);
+ DynamicSparsityPattern dsp(n_dofs);
+ dsp.add(0, 0);
+ PETScWrappers::MPI::SparseMatrix v2;
+ v2.reinit(locally_owned_dofs,
+ locally_owned_dofs,
+ dsp,
+ mpi_communicator);
test(v2);
}
}
//
// the tests build the 5-point stencil matrix for a uniform grid of size N*N
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
void
test()
{
- const unsigned int N = 200;
+ const unsigned int N = 200;
+ const unsigned int n_dofs = N * N;
// build the sparse matrix
- PETScWrappers::MPI::SparseMatrix matrix(
- PETSC_COMM_WORLD, N * N, N * N, N * N, N * N, 5);
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes =
+ Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int my_id = Utilities::MPI::this_mpi_process(mpi_communicator);
+ Assert(n_dofs % n_mpi_processes == 0, ExcInternalError());
+ const unsigned int n_local_dofs = n_dofs / n_mpi_processes;
+ IndexSet locally_owned_dofs(n_dofs);
+ locally_owned_dofs.add_range(my_id * n_dofs / n_mpi_processes,
+ (my_id + 1) * n_dofs / n_mpi_processes);
+ IndexSet locally_relevant_dofs = locally_owned_dofs;
+ DynamicSparsityPattern dsp(n_dofs);
+ for (unsigned int i = 0; i < N; i++)
+ for (unsigned int j = 0; j < N; j++)
+ {
+ const unsigned int global = i * N + j;
+ dsp.add(global, global);
+ if (j > 0)
+ {
+ dsp.add(global - 1, global);
+ dsp.add(global, global - 1);
+ }
+ if (j < N - 1)
+ {
+ dsp.add(global + 1, global);
+ dsp.add(global, global + 1);
+ }
+ if (i > 0)
+ {
+ dsp.add(global - N, global);
+ dsp.add(global, global - N);
+ }
+ if (i < N - 1)
+ {
+ dsp.add(global + N, global);
+ dsp.add(global, global + N);
+ }
+ }
+
+ PETScWrappers::MPI::SparseMatrix matrix;
+ matrix.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator);
for (unsigned int i = 0; i < N; i++)
for (unsigned int j = 0; j < N; j++)
{
// then do a single matrix-vector
// multiplication with subsequent formation
// of the matrix norm
- PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, N * N, N * N);
- PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, N * N, N * N);
- for (unsigned int i = 0; i < N * N; ++i)
+ PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, n_dofs, n_dofs);
+ PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, n_dofs, n_dofs);
+ for (unsigned int i = 0; i < n_dofs; ++i)
v1(i) = i;
matrix.vmult(v2, v1);
// matrix in a consecutive fashion, but rather according to the order of
// degrees of freedom in the sequence of cells that we traverse
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
void
test()
{
- const unsigned int N = 200;
+ const unsigned int N = 200;
+ const unsigned int n_dofs = N * N;
// first find a random permutation of the
// indices
}
// build the sparse matrix
- PETScWrappers::MPI::SparseMatrix matrix(
- PETSC_COMM_WORLD, N * N, N * N, N * N, N * N, 5);
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes =
+ Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int my_id = Utilities::MPI::this_mpi_process(mpi_communicator);
+ Assert(n_dofs % n_mpi_processes == 0, ExcInternalError());
+ const unsigned int n_local_dofs = n_dofs / n_mpi_processes;
+ IndexSet locally_owned_dofs(n_dofs);
+ locally_owned_dofs.add_range(my_id * n_dofs / n_mpi_processes,
+ (my_id + 1) * n_dofs / n_mpi_processes);
+ IndexSet locally_relevant_dofs = locally_owned_dofs;
+ DynamicSparsityPattern dsp(n_dofs);
+ for (unsigned int i_ = 0; i_ < N; i_++)
+ for (unsigned int j_ = 0; j_ < N; j_++)
+ {
+ const unsigned int i = permutation[i_];
+ const unsigned int j = permutation[j_];
+
+ const unsigned int global = i * N + j;
+ dsp.add(global, global);
+ if (j > 0)
+ {
+ dsp.add(global - 1, global);
+ dsp.add(global, global - 1);
+ }
+ if (j < N - 1)
+ {
+ dsp.add(global + 1, global);
+ dsp.add(global, global + 1);
+ }
+ if (i > 0)
+ {
+ dsp.add(global - N, global);
+ dsp.add(global, global - N);
+ }
+ if (i < N - 1)
+ {
+ dsp.add(global + N, global);
+ dsp.add(global, global + N);
+ }
+ }
+
+ PETScWrappers::MPI::SparseMatrix matrix;
+ matrix.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator);
for (unsigned int i_ = 0; i_ < N; i_++)
for (unsigned int j_ = 0; j_ < N; j_++)
{
// then do a single matrix-vector
// multiplication with subsequent formation
// of the matrix norm
- PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, N * N, N * N);
- PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, N * N, N * N);
- for (unsigned int i = 0; i < N * N; ++i)
+ PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, n_dofs, n_dofs);
+ PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, n_dofs, n_dofs);
+ for (unsigned int i = 0; i < n_dofs; ++i)
v1(i) = i;
matrix.vmult(v2, v1);
deallog << m(i, i) << " ";
deallog << std::endl;
- m.add(m2, 1.0);
+ m.add(1.0, m2);
deallog << "after: " << m(0, 1) << std::endl;
for (unsigned int i = 0; i < s; ++i)
deallog << m(i, i) << " ";
deallog << std::endl;
- m.add(m2, -1.0);
+ m.add(-1.0, m2);
deallog << "back to original: " << m(0, 1) << std::endl;
for (unsigned int i = 0; i < s; ++i)