# include <dmumps_c.h>
#endif
+#ifdef DEAL_II_WITH_TRILINOS
+# include <deal.II/lac/trilinos_sparse_matrix.h>
+# include <deal.II/lac/trilinos_vector.h>
+#endif
+
+#ifdef DEAL_II_WITH_PETSC
+# include <deal.II/lac/petsc_sparse_matrix.h>
+# include <deal.II/lac/petsc_vector.h>
+#endif
+
DEAL_II_NAMESPACE_OPEN
namespace types
*/
using size_type = types::global_dof_index;
-
/**
- * The AdditionalData struct contains data for controlling the MUMPS
- * execution.
+ * The AdditionalData contains data for controlling the MUMPS execution.
*/
struct AdditionalData
{
{}
/*
- * If true, the MUMPS solver will print out details of the execution.
+ * If true, the MUMPS solver will print out details of the execution
*/
bool output_details;
/*
- * If true, the MUMPS solver will print out error statistics.
+ * If true, the MUMPS solver will print out error statistics
*/
bool error_statistics;
/*
bool posdef;
/*
- * If true, the MUMPS solver will use the Block Low-Rank factorization.
- */
- bool blr_factorization;
-
- /*
- * Stores Block Low-Rank approximation settings to be used in MUMPS
- * factorization, if enabled.
+ * If true, the MUMPS solver will use the Block Low-Rank factorization
*/
+ bool blr_factorization;
BlockLowRank blr;
};
-
/**
- * Constructor, takes <tt>AdditionalData</tt> to control MUMPS execution.
+ * Constructor, takes an MPI_Comm which defaults to MPI_COMM_WORLD and an
+ * <tt>AdditionalData</tt> to control MUMPS execution.
*/
SparseDirectMUMPS(const AdditionalData &additional_data = AdditionalData(),
const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
- * Destructor.
+ * Destructor
*/
~SparseDirectMUMPS();
/**
- * Exception.
+ * Exception
*/
DeclException0(ExcInitializeAlreadyCalled);
* vector <tt>src</tt> and the solution is written into the output vector
* <tt>dst</tt>.
*/
+ template <typename VectorType>
void
- vmult(Vector<double> &dst, const Vector<double> &src) const;
+ vmult(VectorType &dst, const VectorType &src) const;
+
/**
* A function in which the inverse of the transposed matrix is applied to the
* input vector <tt>src</tt> and the solution is written into the output
* vector <tt>dst</tt>.
*/
+ template <typename VectorType>
void
- Tvmult(Vector<double> &dst, const Vector<double> &src) const;
+ Tvmult(VectorType &, const VectorType &src) const;
/**
* A function that returns the ICNTL integer array from MUMPS.
* The ICNTL array contains integer control parameters for the MUMPS solver.
* Keep in mind that MUMPS is a fortran library and the documentation refers
- * to indices into this array starting from one rather than from zero. To
- * select the correct index one can use a macro like this:
- * `#define ICNTL(I) icntl[(I)-1]`. In the MUMPS documentation there is the
+ * to indices into this array into this array starting from one rather than
+ * from zero. To select the correct index one can use a macro like this:
+ * #define ICNTL(I) icntl[(I)-1]. In the MUMPS documentation there is the
* description of each parameter of the array. Be aware that ownership of the
* array remains in the current class rather than with the caller of this
* function.
*/
mutable std::vector<double> rhs;
+ /**
+ * Local to global index mapping for the right-hand side vector.
+ */
+ mutable std::vector<int> irhs_loc;
+
/**
* irn contains the row indices of the non-zero entries of the matrix.
*/
-<<<<<<< HEAD
std::unique_ptr<types::mumps_index[]> irn;
-=======
- std::unique_ptr<int[]> irn;
->>>>>>> 3caf1bb907 (Avoid raw pointers in SparseDirectMUMPS)
/**
* jcn contains the column indices of the non-zero entries of the matrix.
*/
-<<<<<<< HEAD
std::unique_ptr<types::mumps_index[]> jcn;
-=======
- std::unique_ptr<int[]> jcn;
->>>>>>> 3caf1bb907 (Avoid raw pointers in SparseDirectMUMPS)
/**
* The number of rows of the matrix. The matrix is square.
types::global_dof_index n;
/**
- * The number of non-zero entries in the matrix.
+ * Number of non-zero entries in the matrix.
*/
types::mumps_nnz nnz;
/**
- * This function hands over to MUMPS the system's <tt>matrix</tt>.
+ * IndexSet storing the locally owned rows of the matrix.
+ */
+ IndexSet row_range;
+
+ /**
+ * This function initializes a MUMPS instance and hands over the system's
+ * matrix <tt>matrix</tt>.
*/
template <class Matrix>
void
SparseDirectMUMPS::initialize_matrix(const Matrix &matrix)
{
Assert(matrix.n() == matrix.m(), ExcMessage("Matrix needs to be square."));
- // Here we should be checking if the matrix is respecting the symmetry given
- //(I.E. sym = 0 for non-symmetric matrix, sym = 1 for posdef matrix, sym = 2
- // for general symmetric).
- // Hand over matrix to MUMPS as centralized assembled matrix
- if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
+ n = matrix.n();
+ id.n = n;
+
+ if constexpr (std::is_same_v<SparseMatrix<double>, Matrix>)
+ {
+ // Serial matrix: hand over matrix to MUMPS as centralized assembled
+ // matrix
+ if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
+ {
+ // number of nonzero elements in matrix
+ if constexpr (std::is_same_v<Matrix, SparseMatrix<double>>)
+ nnz = matrix.n_actually_nonzero_elements();
+
+ // representation of the matrix
+ a = std::make_unique<double[]>(nnz);
+
+ // matrix indices pointing to the row and column dimensions
+ // respectively of the matrix representation above (a): ie. a[k] is
+ // the matrix element (irn[k], jcn[k])
+ irn = std::make_unique<MUMPS_INT[]>(nnz);
+ jcn = std::make_unique<MUMPS_INT[]>(nnz);
+
+ size_type n_non_zero_elements = 0;
+
+ // loop over the elements of the matrix row by row, as suggested in
+ // the documentation of the sparse matrix iterator class
+ if (additional_data.symmetric == true)
+ {
+ for (size_type row = 0; row < matrix.m(); ++row)
+ {
+ for (typename Matrix::const_iterator ptr = matrix.begin(row);
+ ptr != matrix.end(row);
+ ++ptr)
+ if (std::abs(ptr->value()) > 0.0 && ptr->column() >= row)
+ {
+ a[n_non_zero_elements] = ptr->value();
+ irn[n_non_zero_elements] = row + 1;
+ jcn[n_non_zero_elements] = ptr->column() + 1;
+
+ ++n_non_zero_elements;
+ }
+ }
+ }
+ else
+ {
+ for (size_type row = 0; row < matrix.m(); ++row)
+ {
+ for (typename Matrix::const_iterator ptr = matrix.begin(row);
+ ptr != matrix.end(row);
+ ++ptr)
+ if (std::abs(ptr->value()) > 0.0)
+ {
+ a[n_non_zero_elements] = ptr->value();
+ irn[n_non_zero_elements] = row + 1;
+ jcn[n_non_zero_elements] = ptr->column() + 1;
+ ++n_non_zero_elements;
+ }
+ }
+ }
+ id.n = n;
+ id.nnz = n_non_zero_elements;
+ id.irn = irn.get();
+ id.jcn = jcn.get();
+ id.a = a.get();
+ }
+ }
+ else if constexpr (std::is_same_v<TrilinosWrappers::SparseMatrix, Matrix> ||
+ std::is_same_v<PETScWrappers::MPI::SparseMatrix, Matrix>)
{
- // Set number of unknowns
- n = matrix.n();
+ // Distributed matrix case
+ id.icntl[17] = 3; // distributed matrix assembly
+ id.nnz = matrix.n_nonzero_elements();
+ nnz = id.nnz;
+ size_type n_non_zero_local = 0;
- // number of nonzero elements in matrix
- nnz = matrix.n_actually_nonzero_elements();
+ // Get the range of rows owned by this process
+ row_range = matrix.locally_owned_range_indices();
+ size_type local_non_zeros = 0;
- // representation of the matrix
- a = std::make_unique<double[]>(nnz);
+ if constexpr (std::is_same_v<TrilinosWrappers::SparseMatrix, Matrix>)
+ {
+ const auto &trilinos_matrix = matrix.trilinos_matrix();
+ local_non_zeros = trilinos_matrix.NumMyNonzeros();
+ }
+ else if constexpr (std::is_same_v<PETScWrappers::MPI::SparseMatrix,
+ Matrix>)
+ {
+ Mat &petsc_matrix =
+ const_cast<PETScWrappers::MPI::SparseMatrix &>(matrix)
+ .petsc_matrix();
+ MatInfo info;
+ MatGetInfo(petsc_matrix, MAT_LOCAL, &info);
+ local_non_zeros = (PetscInt)info.nz_used;
+ }
- // matrix indices pointing to the row and column dimensions
- // respectively of the matrix representation above (a): ie. a[k] is
- // the matrix element (irn[k], jcn[k])
- irn = std::make_unique<MUMPS_INT[]>(nnz);
- jcn = std::make_unique<MUMPS_INT[]>(nnz);
- size_type n_non_zero_elements = 0;
+ irn = std::make_unique<MUMPS_INT[]>(local_non_zeros);
+ jcn = std::make_unique<MUMPS_INT[]>(local_non_zeros);
+ a = std::make_unique<double[]>(local_non_zeros);
+ irhs_loc.resize(row_range.n_elements());
- // loop over the elements of the matrix row by row, as suggested in
- // the documentation of the sparse matrix iterator class
if (additional_data.symmetric == true)
{
- for (size_type row = 0; row < matrix.m(); ++row)
+ for (const auto &row : row_range)
{
- for (typename Matrix::const_iterator ptr = matrix.begin(row);
- ptr != matrix.end(row);
- ++ptr)
- if (std::abs(ptr->value()) > 0.0 && ptr->column() >= row)
+ for (auto it = matrix.begin(row); it != matrix.end(row); ++it)
+ if (std::abs(it->value()) > 0.0 && it->column() >= row)
{
- a[n_non_zero_elements] = ptr->value();
- irn[n_non_zero_elements] = row + 1;
- jcn[n_non_zero_elements] = ptr->column() + 1;
+ const int global_row = row + 1;
+ const int global_col = it->column() + 1;
- ++n_non_zero_elements;
+ // Store this non-zero entry
+ irn[n_non_zero_local] = global_row;
+ jcn[n_non_zero_local] = global_col;
+ a[n_non_zero_local] = it->value();
+
+ // Count local non-zeros
+ n_non_zero_local++;
}
+
+ const types::global_cell_index local_index =
+ row_range.index_within_set(row);
+ irhs_loc[local_index] = row + 1;
}
}
else
{
- for (size_type row = 0; row < matrix.m(); ++row)
+ for (const auto &row : row_range)
{
- for (typename Matrix::const_iterator ptr = matrix.begin(row);
- ptr != matrix.end(row);
- ++ptr)
- if (std::abs(ptr->value()) > 0.0)
- {
- a[n_non_zero_elements] = ptr->value();
- irn[n_non_zero_elements] = row + 1;
- jcn[n_non_zero_elements] = ptr->column() + 1;
- ++n_non_zero_elements;
- }
+ // Loop over columns
+ for (auto it = matrix.begin(row); it != matrix.end(row); ++it)
+ {
+ if (std::abs(it->value()) > 0.0)
+ {
+ const int global_row = row + 1;
+ const int global_col = it->column() + 1;
+
+ irn[n_non_zero_local] = global_row;
+ jcn[n_non_zero_local] = global_col;
+ a[n_non_zero_local] = it->value();
+
+ // Count local non-zeros
+ n_non_zero_local++;
+ }
+ }
+
+ const types::global_cell_index local_index =
+ row_range.index_within_set(row);
+ irhs_loc[local_index] = row + 1;
}
}
- id.n = n;
- id.nnz = n_non_zero_elements;
- id.irn = irn.get();
- id.jcn = jcn.get();
- id.a = a.get();
+
+ // Hand over local arrays to MUMPS
+ id.nnz_loc = n_non_zero_local;
+ id.irn_loc = irn.get();
+ id.jcn_loc = jcn.get();
+ id.a_loc = a.get();
+ id.irhs_loc = irhs_loc.data();
+ }
+ else
+ {
+ DEAL_II_NOT_IMPLEMENTED();
}
}
dmumps_c(&id);
}
+
+
+template <typename VectorType>
void
-SparseDirectMUMPS::vmult(Vector<double> &dst, const Vector<double> &src) const
+SparseDirectMUMPS::vmult(VectorType &dst, const VectorType &src) const
{
- // and that the matrix has at least one nonzero element:
+ // Check that the matrix has at least one nonzero element:
Assert(nnz != 0, ExcNotInitialized());
Assert(n == dst.size(), ExcMessage("Destination vector has the wrong size."));
Assert(n == src.size(), ExcMessage("Source vector has the wrong size."));
- // Hand over right-hand side
- copy_rhs_to_mumps(src);
- // Start solver
- id.job = 3;
- dmumps_c(&id);
- copy_solution(dst);
-}
+ if constexpr (std::is_same_v<VectorType, Vector<double>>)
+ {
+ // Centralized assembly for serial vectors.
+
+ // Hand over right-hand side
+ copy_rhs_to_mumps(src);
+
+ // Start solver
+ id.job = 3;
+ dmumps_c(&id);
+ copy_solution(dst);
+ }
+ else if constexpr (std::is_same_v<VectorType,
+ TrilinosWrappers::MPI::Vector> ||
+ std::is_same_v<VectorType, PETScWrappers::MPI::Vector>)
+ {
+ id.icntl[19] = 10; // distributed rhs
+ id.icntl[20] = 0; // centralized solution, stored on rank 0 by MUMPS
+ id.nrhs = 1;
+ id.lrhs_loc = n;
+ id.nloc_rhs = row_range.n_elements();
+
+
+ if constexpr (std::is_same_v<VectorType, TrilinosWrappers::MPI::Vector>)
+ id.rhs_loc = const_cast<double *>(src.begin());
+ else if constexpr (std::is_same_v<VectorType, PETScWrappers::MPI::Vector>)
+ {
+ PetscScalar *local_array;
+ VecGetArray(
+ const_cast<PETScWrappers::MPI::Vector &>(src).petsc_vector(),
+ &local_array);
+ id.rhs_loc = local_array;
+ VecRestoreArray(
+ const_cast<PETScWrappers::MPI::Vector &>(src).petsc_vector(),
+ &local_array);
+ }
+
+
+ if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
+ {
+ rhs.resize(id.lrhs_loc);
+ id.rhs = rhs.data();
+ }
+
+ // Start solver
+ id.job = 3;
+ dmumps_c(&id);
+
+ // Copy solution into the given vector
+ // For MUMPS with centralized solution (icntl[20]=0), the solution is only
+ // on the root process (0) and needs to be distributed to all processes
+ if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
+ {
+ // Set all the values in the dst vector
+ for (size_type i = 0; i < n; ++i)
+ dst[i] = rhs[i];
+ }
+ // Ensure the distributed vector has the correct values everywhere
+ dst.compress(VectorOperation::insert);
+
+ rhs.resize(0); // remove rhs again
+ }
+}
+template <typename VectorType>
void
-SparseDirectMUMPS::Tvmult(Vector<double> &dst, const Vector<double> &src) const
+SparseDirectMUMPS::Tvmult(VectorType &dst, const VectorType &src) const
{
// The matrix has at least one nonzero element:
Assert(nnz != 0, ExcNotInitialized());
-
Assert(n == dst.size(), ExcMessage("Destination vector has the wrong size."));
Assert(n == src.size(), ExcMessage("Source vector has the wrong size."));
id.icntl[8] = 2; // transpose
- // Hand over right-hand side
- copy_rhs_to_mumps(src);
-
- // Start solver
- id.job = 3;
- dmumps_c(&id);
- copy_solution(dst);
+ vmult(dst, src);
id.icntl[8] = 1; // reset to default
}
// explicit instantiations for SparseDirectMUMPS
#ifdef DEAL_II_WITH_MUMPS
+
+# define InstantiateMUMPSMatVec(VECTOR) \
+ template void SparseDirectMUMPS::vmult(VECTOR &, const VECTOR &) const; \
+ template void SparseDirectMUMPS::Tvmult(VECTOR &, const VECTOR &) const;
+
+InstantiateMUMPSMatVec(TrilinosWrappers::MPI::Vector)
+ InstantiateMUMPSMatVec(PETScWrappers::MPI::Vector)
+ InstantiateMUMPSMatVec(Vector<double>)
+
# define InstantiateMUMPS(MATRIX) \
template void SparseDirectMUMPS::initialize(const MATRIX &);
-InstantiateMUMPS(SparseMatrix<double>) InstantiateMUMPS(SparseMatrix<float>)
+ InstantiateMUMPS(SparseMatrix<double>)
+ InstantiateMUMPS(SparseMatrix<float>)
+ InstantiateMUMPS(TrilinosWrappers::SparseMatrix)
+ InstantiateMUMPS(PETScWrappers::SparseMatrix)
+ InstantiateMUMPS(PETScWrappers::MPI::SparseMatrix)
// InstantiateMUMPS(SparseMatrixEZ<double>)
// InstantiateMUMPS(SparseMatrixEZ<float>)
InstantiateMUMPS(BlockSparseMatrix<double>)
--- /dev/null
+// ------------------------------------------------------------------------
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+// Copyright (C) 2001 - 2024 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// Part of the source code is dual licensed under Apache-2.0 WITH
+// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
+// governing the source code and code contributions can be found in
+// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
+//
+// ------------------------------------------------------------------------
+
+// test the mumps sparse direct solver in parallel on a
+// TrilinosWrappers::SparseMatrix
+
+#include <deal.II/base/function.h>
+#include <deal.II/base/quadrature_lib.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/lac/sparse_direct.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/vector.h>
+
+#include <deal.II/numerics/matrix_tools.h>
+#include <deal.II/numerics/vector_tools.h>
+
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+
+#include "../tests/tests.h"
+
+template <typename MatrixType, typename VectorType>
+void
+solve_and_check(const MatrixType &M,
+ const VectorType &rhs,
+ const VectorType &solution)
+{
+ SparseDirectMUMPS::AdditionalData data;
+ data.output_details = false;
+ data.symmetric = true;
+ data.posdef = true;
+ SparseDirectMUMPS solver(data);
+ solver.initialize(M);
+ VectorType dst(rhs);
+ solver.vmult(dst, rhs);
+ dst -= solution;
+ Assert(dst.l2_norm() < 1e-8, ExcInternalError());
+}
+
+template <int dim, typename MatrixType, typename VectorType>
+void
+test()
+{
+ deallog << dim << 'd' << std::endl;
+
+ MPI_Comm mpi_communicator = MPI_COMM_WORLD;
+
+ parallel::distributed::Triangulation<dim> tria(mpi_communicator);
+ GridGenerator::hyper_cube(tria, -1, 1);
+ tria.refine_global(1);
+
+ // destroy the uniformity of the matrix by
+ // refining one cell
+ tria.begin_active()->set_refine_flag();
+ tria.execute_coarsening_and_refinement();
+ tria.refine_global(8 - 2 * dim);
+
+ FE_Q<dim> fe(1);
+ DoFHandler<dim> dof_handler(tria);
+ dof_handler.distribute_dofs(fe);
+
+ deallog << "Number of dofs = " << dof_handler.n_dofs() << std::endl;
+
+ auto locally_relevant_dofs =
+ DoFTools::extract_locally_relevant_dofs(dof_handler);
+ auto locally_owned_dofs = dof_handler.locally_owned_dofs();
+ DynamicSparsityPattern dsp(locally_relevant_dofs);
+ AffineConstraints<double> constraints;
+
+ DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false);
+ SparsityTools::distribute_sparsity_pattern(dsp,
+ locally_owned_dofs,
+ mpi_communicator,
+ locally_relevant_dofs);
+
+ MatrixType B;
+ B.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator);
+
+ QGauss<dim> qr(2);
+ MatrixTools::create_mass_matrix(dof_handler, qr, B);
+ B.compress(VectorOperation::add);
+
+ SparseDirectMUMPS::AdditionalData data;
+ data.output_details = true;
+ data.error_statistics = true;
+ SparseDirectMUMPS Binv(data);
+ Binv.initialize(B);
+
+ // for a number of different solution
+ // vectors, make up a matching rhs vector
+ // and check what the solver finds
+ for (unsigned int i = 0; i < 3; ++i)
+ {
+ VectorType solution(dof_handler.locally_owned_dofs(), mpi_communicator);
+ VectorType x(dof_handler.locally_owned_dofs(), mpi_communicator);
+ VectorType b(dof_handler.locally_owned_dofs(), mpi_communicator);
+
+ for (const types::global_dof_index idx : dof_handler.locally_owned_dofs())
+ solution(idx) = idx + idx * (i + 1) * (i + 1);
+
+ solution.compress(VectorOperation::insert);
+
+ B.vmult(b, solution);
+
+ Binv.vmult(x, b);
+
+ x -= solution;
+ deallog << "relative norm distance = " << x.l2_norm() / solution.l2_norm()
+ << std::endl;
+ deallog << "absolute norms = " << x.l2_norm() << ' ' << solution.l2_norm()
+ << std::endl;
+ Assert(x.l2_norm() / solution.l2_norm() < 1e-8, ExcInternalError());
+
+ // check with also the posdef option
+ solve_and_check<MatrixType, VectorType>(B, b, solution);
+ }
+}
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, numbers::invalid_unsigned_int);
+
+ MPILogInitAll log(true);
+
+ deallog << "Trilinos matrices" << std::endl;
+ test<2, TrilinosWrappers::SparseMatrix, TrilinosWrappers::MPI::Vector>();
+ test<3, TrilinosWrappers::SparseMatrix, TrilinosWrappers::MPI::Vector>();
+
+
+ deallog << "PETSc matrices" << std::endl;
+ test<2, PETScWrappers::MPI::SparseMatrix, PETScWrappers::MPI::Vector>();
+ test<3, PETScWrappers::MPI::SparseMatrix, PETScWrappers::MPI::Vector>();
+}
--- /dev/null
+
+DEAL:0::Trilinos matrices
+DEAL:0::2d
+DEAL:0::Number of dofs = 1889
+DEAL:0::relative norm distance = 4.64308e-16
+DEAL:0::absolute norms = 4.39998e-11 94764.3
+DEAL:0::relative norm distance = 4.65027e-16
+DEAL:0::absolute norms = 1.10170e-10 236911.
+DEAL:0::relative norm distance = 4.65027e-16
+DEAL:0::absolute norms = 2.20340e-10 473822.
+DEAL:0::3d
+DEAL:0::Number of dofs = 1333
+DEAL:0::relative norm distance = 1.48787e-15
+DEAL:0::absolute norms = 8.35670e-11 56165.6
+DEAL:0::relative norm distance = 1.29809e-15
+DEAL:0::absolute norms = 1.82269e-10 140414.
+DEAL:0::relative norm distance = 1.29623e-15
+DEAL:0::absolute norms = 3.64018e-10 280828.
+DEAL:0::PETSc matrices
+DEAL:0::2d
+DEAL:0::Number of dofs = 1889
+DEAL:0::relative norm distance = 4.02650e-16
+DEAL:0::absolute norms = 3.81568e-11 94764.3
+DEAL:0::relative norm distance = 4.10886e-16
+DEAL:0::absolute norms = 9.73433e-11 236911.
+DEAL:0::relative norm distance = 4.10886e-16
+DEAL:0::absolute norms = 1.94687e-10 473822.
+DEAL:0::3d
+DEAL:0::Number of dofs = 1333
+DEAL:0::relative norm distance = 9.32100e-16
+DEAL:0::absolute norms = 5.23519e-11 56165.6
+DEAL:0::relative norm distance = 9.01192e-16
+DEAL:0::absolute norms = 1.26540e-10 140414.
+DEAL:0::relative norm distance = 9.01192e-16
+DEAL:0::absolute norms = 2.53080e-10 280828.
+
+DEAL:1::Trilinos matrices
+DEAL:1::2d
+DEAL:1::Number of dofs = 1889
+DEAL:1::relative norm distance = 4.64308e-16
+DEAL:1::absolute norms = 4.39998e-11 94764.3
+DEAL:1::relative norm distance = 4.65027e-16
+DEAL:1::absolute norms = 1.10170e-10 236911.
+DEAL:1::relative norm distance = 4.65027e-16
+DEAL:1::absolute norms = 2.20340e-10 473822.
+DEAL:1::3d
+DEAL:1::Number of dofs = 1333
+DEAL:1::relative norm distance = 1.48787e-15
+DEAL:1::absolute norms = 8.35670e-11 56165.6
+DEAL:1::relative norm distance = 1.29809e-15
+DEAL:1::absolute norms = 1.82269e-10 140414.
+DEAL:1::relative norm distance = 1.29623e-15
+DEAL:1::absolute norms = 3.64018e-10 280828.
+DEAL:1::PETSc matrices
+DEAL:1::2d
+DEAL:1::Number of dofs = 1889
+DEAL:1::relative norm distance = 4.02650e-16
+DEAL:1::absolute norms = 3.81568e-11 94764.3
+DEAL:1::relative norm distance = 4.10886e-16
+DEAL:1::absolute norms = 9.73433e-11 236911.
+DEAL:1::relative norm distance = 4.10886e-16
+DEAL:1::absolute norms = 1.94687e-10 473822.
+DEAL:1::3d
+DEAL:1::Number of dofs = 1333
+DEAL:1::relative norm distance = 9.32100e-16
+DEAL:1::absolute norms = 5.23519e-11 56165.6
+DEAL:1::relative norm distance = 9.01192e-16
+DEAL:1::absolute norms = 1.26540e-10 140414.
+DEAL:1::relative norm distance = 9.01192e-16
+DEAL:1::absolute norms = 2.53080e-10 280828.
+