// different kinds of PETSc matrices and vectors
// TODO: A bit more tests...
+#include <deal.II/base/index_set.h>
+
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/linear_operator.h>
#include "../tests.h"
using size_type = PETScWrappers::MPI::SparseMatrix::size_type;
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ const auto rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
initlog();
deallog << std::setprecision(10);
{
- unsigned int np = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const unsigned int np = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const unsigned int n_dofs = 4;
if (4 % np == 0 && np <= 4)
{
- PETScWrappers::MPI::SparseMatrix a(
- MPI_COMM_WORLD, 4, 4, 4 / np, 4 / np, 1);
- for (unsigned int i = 0; i < 4; ++i)
- for (unsigned int j = 0; j < 4; ++j)
+ const auto dofs_per_processor = n_dofs / np;
+ IndexSet locally_owned_dofs(n_dofs);
+ locally_owned_dofs.add_range(rank * dofs_per_processor,
+ (rank + 1) * dofs_per_processor);
+ locally_owned_dofs.compress();
+ DynamicSparsityPattern dsp(n_dofs, n_dofs);
+ for (const auto &index : locally_owned_dofs)
+ dsp.add(index, index);
+
+ PETScWrappers::MPI::SparseMatrix a;
+ a.reinit(locally_owned_dofs, locally_owned_dofs, dsp, MPI_COMM_WORLD);
+ for (const auto &i : locally_owned_dofs)
+ for (const auto &j : locally_owned_dofs)
a.add(i, i, 1);
a.compress(VectorOperation::add);
auto op_a = linear_operator<PETScWrappers::MPI::Vector>(a);