#include "../tests.h"
+#include "../testmatrix.h"
+
template <typename MatrixType>
void
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
{
- const unsigned int n_dofs = 420;
+ const unsigned int N = 20;
+ const unsigned int n_dofs = N * N;
// check
// PETScWrappers::SparseMatrix
PETScWrappers::SparseMatrix v1(n_dofs, n_dofs, 5);
const unsigned int n_mpi_processes = static_cast<unsigned int>(n_jobs);
Assert(n_dofs % n_mpi_processes == 0, ExcInternalError());
const unsigned int n_local_dofs = n_dofs / n_mpi_processes;
- PETScWrappers::MPI::SparseMatrix v2(
- mpi_communicator, n_dofs, n_dofs, n_local_dofs, n_local_dofs, 5);
+ PETScWrappers::MPI::SparseMatrix v2;
+ {
+ FDMatrix fd_matrix(N, N);
+ DynamicSparsityPattern dsp(n_dofs, n_dofs);
+ fd_matrix.five_point_structure(dsp);
+ dsp.add(0, 0); // be sure that we have this one
+ SparsityPattern sparsity_pattern;
+ sparsity_pattern.copy_from(dsp);
+ IndexSet all_dofs(n_dofs);
+ all_dofs.add_range(0, n_dofs);
+ v2.reinit(all_dofs, all_dofs, sparsity_pattern, PETSC_COMM_WORLD);
+ }
test(v2);
}
}
//
// the tests build the 5-point stencil matrix for a uniform grid of size N*N
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/sparsity_pattern.h>
#include <iostream>
void
test()
{
- const unsigned int N = 200;
+ const unsigned int N = 200;
+ const unsigned int n_dofs = N * N;
- // build the sparse matrix
- PETScWrappers::MPI::SparseMatrix matrix(
- PETSC_COMM_WORLD, N * N, N * N, N * N, N * N, 5);
+ DynamicSparsityPattern dsp(n_dofs, n_dofs);
+ // An older version of this test relied on PETSc doing dynamic allocation, but
+ // we require sparsity patterns in constructors now so we need the sparsity
+ // pattern ahead of time - hence this is done twice
+ for (unsigned int i = 0; i < N; i++)
+ for (unsigned int j = 0; j < N; j++)
+ {
+ const unsigned int global = i * N + j;
+ dsp.add(global, global);
+ if (j > 0)
+ {
+ dsp.add(global - 1, global);
+ dsp.add(global, global - 1);
+ }
+ if (j < N - 1)
+ {
+ dsp.add(global + 1, global);
+ dsp.add(global, global + 1);
+ }
+ if (i > 0)
+ {
+ dsp.add(global - N, global);
+ dsp.add(global, global - N);
+ }
+ if (i < N - 1)
+ {
+ dsp.add(global + N, global);
+ dsp.add(global, global + N);
+ }
+ }
+
+ SparsityPattern sparsity_pattern;
+ sparsity_pattern.copy_from(dsp);
+ IndexSet all_dofs(n_dofs);
+ all_dofs.add_range(0, n_dofs);
+
+ PETScWrappers::MPI::SparseMatrix matrix;
+ matrix.reinit(all_dofs, all_dofs, sparsity_pattern, PETSC_COMM_WORLD);
for (unsigned int i = 0; i < N; i++)
for (unsigned int j = 0; j < N; j++)
{
// matrix in a consecutive fashion, but rather according to the order of
// degrees of freedom in the sequence of cells that we traverse
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/sparsity_pattern.h>
#include <iostream>
void
test()
{
- const unsigned int N = 200;
+ const unsigned int N = 200;
+ const unsigned int n_dofs = N * N;
+
+ DynamicSparsityPattern dsp(n_dofs, n_dofs);
+ // An older version of this test relied on PETSc doing dynamic allocation, but
+ // we require sparsity patterns in constructors now so we need the sparsity
+ // pattern ahead of time - hence this is done twice
+ for (unsigned int i = 0; i < N; i++)
+ for (unsigned int j = 0; j < N; j++)
+ {
+ const unsigned int global = i * N + j;
+ dsp.add(global, global);
+ if (j > 0)
+ {
+ dsp.add(global - 1, global);
+ dsp.add(global, global - 1);
+ }
+ if (j < N - 1)
+ {
+ dsp.add(global + 1, global);
+ dsp.add(global, global + 1);
+ }
+ if (i > 0)
+ {
+ dsp.add(global - N, global);
+ dsp.add(global, global - N);
+ }
+ if (i < N - 1)
+ {
+ dsp.add(global + N, global);
+ dsp.add(global, global + N);
+ }
+ }
+
+ SparsityPattern sparsity_pattern;
+ sparsity_pattern.copy_from(dsp);
+ IndexSet all_dofs(n_dofs);
+ all_dofs.add_range(0, n_dofs);
+
+ PETScWrappers::MPI::SparseMatrix matrix;
+ matrix.reinit(all_dofs, all_dofs, sparsity_pattern, PETSC_COMM_WORLD);
// first find a random permutation of the
// indices
}
// build the sparse matrix
- PETScWrappers::MPI::SparseMatrix matrix(
- PETSC_COMM_WORLD, N * N, N * N, N * N, N * N, 5);
for (unsigned int i_ = 0; i_ < N; i_++)
for (unsigned int j_ = 0; j_ < N; j_++)
{
-// check SparseMatrix::add(other, factor)
+// check SparseMatrix::add(factor, other)
#include <deal.II/lac/petsc_sparse_matrix.h>
deallog << m(i, i) << " ";
deallog << std::endl;
- m.add(m2, 1.0);
+ m.add(1.0, m2);
deallog << "after: " << m(0, 1) << std::endl;
for (unsigned int i = 0; i < s; ++i)
deallog << m(i, i) << " ";
deallog << std::endl;
- m.add(m2, -1.0);
+ m.add(-1.0, m2);
deallog << "back to original: " << m(0, 1) << std::endl;
for (unsigned int i = 0; i < s; ++i)