From: Timo Heister
Date: Tue, 13 Sep 2016 13:42:54 +0000 (-0400)
Subject: introduce MGCoarseGridIterativeSolver, deprecate MGCoarseGridLACIteration
X-Git-Tag: v8.5.0-rc1~668^2
X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=96357301ed988694ed76735d79730c3ab7f830da;p=dealii.git
introduce MGCoarseGridIterativeSolver, deprecate MGCoarseGridLACIteration
- introduce MGCoarseGridIterativeSolver with a simpler interface
- also works in parallel with Trilinos solvers
- add tests
---
diff --git a/doc/news/changes.h b/doc/news/changes.h
index 61dcb348ac..79bb0c4353 100644
--- a/doc/news/changes.h
+++ b/doc/news/changes.h
@@ -38,6 +38,13 @@ inconvenience this causes.
+
+ - Deprecated: MGCoarseGridLACIteration got deprecated in favor of
+ MGCoarseGridIterativeSolver.
+
+ (Timo Heister, 2016/09/14)
+
+
- Changed: The template parameter order in many VectorTools functions is now
different; this was done so that the order is the same across similar functions.
This will only effect code that explicitly specifies template parameters for
@@ -522,11 +529,18 @@ inconvenience this causes.
Specific improvements
+
- New: LinearAlgebra::Vector is now instantiated for float and double.
(Bruno Turcksin, 2016/09/15)
+ - New: The class MGCoarseGridIterativeSolver is replacing
+ MGCoarseGridLACIteration with a simpler interface.
+
+ (Timo Heister, 2016/09/14)
+
+
- Improved: FEValues no longer generates the mapping's internal database if
the mapping will not be required for the set of update flags specified.
diff --git a/include/deal.II/multigrid/mg_coarse.h b/include/deal.II/multigrid/mg_coarse.h
index 895758960c..4e8c77d1e8 100644
--- a/include/deal.II/multigrid/mg_coarse.h
+++ b/include/deal.II/multigrid/mg_coarse.h
@@ -76,6 +76,8 @@ private:
* be derived from @p Subscriptor to allow for the use of a smart pointer to
* it.
*
+ * @deprecated Use MGCoarseGridIterativeSolver instead.
+ *
* @author Guido Kanschat, 1999, Ralf Hartmann, 2002.
*/
template >
@@ -144,10 +146,88 @@ private:
* Reference to the preconditioner.
*/
PointerMatrixBase *precondition;
+
+} DEAL_II_DEPRECATED;
+
+
+
+/**
+ * Coarse grid multigrid operator for an iterative solver.
+ *
+ * This class provides a wrapper for a deal.II iterative solver with a given
+ * matrix and preconditioner as a coarse grid operator.
+ */
+template
+class MGCoarseGridIterativeSolver : public MGCoarseGridBase
+{
+public:
+ /**
+ * Default constructor.
+ */
+ MGCoarseGridIterativeSolver ();
+
+ /**
+ * Constructor. Only a reference to these objects is stored, so
+ * their lifetime needs to exceed the usage in this class.
+ */
+ MGCoarseGridIterativeSolver (SolverType &solver,
+ const MatrixType &matrix,
+ const PreconditionerType &precondition);
+
+ /**
+ * Initialize with new data, see the corresponding constructor for more
+ * details.
+ */
+ void initialize (SolverType &solver,
+ const MatrixType &matrix,
+ const PreconditionerType &precondition);
+
+ /**
+ * Clear all pointers.
+ */
+ void clear ();
+
+ /**
+ * Implementation of the abstract function. Calls the solve method of the
+ * given solver with matrix, vectors, and preconditioner.
+ */
+ virtual void operator() (const unsigned int level,
+ VectorType &dst,
+ const VectorType &src) const;
+
+private:
+ /**
+ * Reference to the solver.
+ */
+ SmartPointer > solver;
+
+ /**
+ * Reference to the matrix.
+ */
+ SmartPointer > matrix;
+
+ /**
+ * Reference to the preconditioner.
+ */
+ SmartPointer > preconditioner;
+
};
+
/**
* Coarse grid solver by QR factorization implemented in the class
* Householder.
@@ -351,7 +431,90 @@ MGCoarseGridLACIteration
matrix = new PointerMatrix(&m);
}
-//---------------------------------------------------------------------------
+/* ------------------ Functions for MGCoarseGridIterativeSolver ------------ */
+
+template
+MGCoarseGridIterativeSolver
+::MGCoarseGridIterativeSolver ()
+ :
+ solver(0, typeid(*this).name()),
+ matrix(0, typeid(*this).name()),
+ preconditioner(0, typeid(*this).name())
+{}
+
+
+
+template
+MGCoarseGridIterativeSolver
+::MGCoarseGridIterativeSolver (SolverType &solver,
+ const MatrixType &matrix,
+ const PreconditionerType &preconditioner)
+ :
+ solver (&solver, typeid(*this).name()),
+ matrix (&matrix, typeid(*this).name()),
+ preconditioner (&preconditioner, typeid(*this).name())
+{}
+
+
+
+template
+void
+MGCoarseGridIterativeSolver
+::initialize (SolverType &solver_,
+ const MatrixType &matrix_,
+ const PreconditionerType &preconditioner_)
+{
+ solver = &solver_;
+ matrix = &matrix_;
+ preconditioner = &preconditioner_;
+
+}
+
+
+
+template
+void
+MGCoarseGridIterativeSolver
+::clear ()
+{
+ solver = 0;
+ matrix = 0;
+ preconditioner = 0;
+}
+
+
+
+template
+void
+MGCoarseGridIterativeSolver
+::operator() (const unsigned int /*level*/,
+ VectorType &dst,
+ const VectorType &src) const
+{
+ Assert(solver!=0, ExcNotInitialized());
+ Assert(matrix!=0, ExcNotInitialized());
+ Assert(preconditioner!=0, ExcNotInitialized());
+ solver->solve(*matrix, dst, src, *preconditioner);
+}
+
+
+
+/* ------------------ Functions for MGCoarseGridHouseholder ------------ */
template
MGCoarseGridHouseholder::MGCoarseGridHouseholder
diff --git a/tests/multigrid/mg_coarse_01.cc b/tests/multigrid/mg_coarse_01.cc
new file mode 100644
index 0000000000..680608db8d
--- /dev/null
+++ b/tests/multigrid/mg_coarse_01.cc
@@ -0,0 +1,675 @@
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2016 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE at
+ * the top level of the deal.II distribution.
+ *
+ * ---------------------------------------------------------------------
+
+ *
+ * Test different coarse grid solvers in parallel (based on step-50)
+ */
+
+#include "../tests.h"
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#include
+
+namespace LA
+{
+ using namespace dealii::LinearAlgebraTrilinos;
+}
+
+#include
+#include
+#include
+
+namespace Step50
+{
+ using namespace dealii;
+
+ template
+ class LaplaceProblem
+ {
+ public:
+ LaplaceProblem (const unsigned int deg);
+ void run ();
+
+ private:
+ typedef LA::MPI::SparseMatrix matrix_t;
+ typedef LA::MPI::Vector vector_t;
+
+ void setup_system ();
+ void assemble_system ();
+ void assemble_multigrid ();
+ void vcycle (const MGCoarseGridBase &coarse_grid_solver);
+
+ void solve ();
+
+ void refine_grid ();
+ void output_results (const unsigned int cycle) const;
+
+ ConditionalOStream pcout;
+
+ parallel::distributed::Triangulation triangulation;
+ FE_Q fe;
+ DoFHandler mg_dof_handler;
+
+
+ matrix_t system_matrix;
+
+ IndexSet locally_relevant_set;
+
+ ConstraintMatrix hanging_node_constraints;
+ ConstraintMatrix constraints;
+
+ vector_t solution;
+ vector_t system_rhs;
+
+ const unsigned int degree;
+
+ MGLevelObject mg_matrices;
+ MGLevelObject mg_interface_matrices;
+
+ MGConstrainedDoFs mg_constrained_dofs;
+ int K;
+ };
+
+ template
+ class Coefficient : public Function
+ {
+ public:
+ Coefficient (int K)
+ : Function(), K(K) {}
+
+ virtual double value (const Point &p,
+ const unsigned int component = 0) const;
+
+ virtual void value_list (const std::vector > &points,
+ std::vector &values,
+ const unsigned int component = 0) const;
+ private:
+ int K;
+ };
+
+
+
+ template
+ double Coefficient::value (const Point &p,
+ const unsigned int) const
+ {
+ return 1.0;
+
+ double r = 1.0;
+ for (int d=0; d
+ void Coefficient::value_list (const std::vector > &points,
+ std::vector &values,
+ const unsigned int component) const
+ {
+ const unsigned int n_points = points.size();
+
+ Assert (values.size() == n_points,
+ ExcDimensionMismatch (values.size(), n_points));
+
+ Assert (component == 0,
+ ExcIndexRange (component, 0, 1));
+
+ for (unsigned int i=0; i::value (points[i], component);
+ }
+
+
+ template
+ LaplaceProblem::LaplaceProblem (const unsigned int degree)
+ :
+ pcout (std::cout,
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)
+ == 0)),
+ triangulation (MPI_COMM_WORLD,Triangulation::
+ limit_level_difference_at_vertices,
+ parallel::distributed::Triangulation::construct_multigrid_hierarchy),
+ fe (degree),
+ mg_dof_handler (triangulation),
+ degree(degree),
+ K (3)
+ {}
+
+
+ template
+ void LaplaceProblem::setup_system ()
+ {
+ mg_dof_handler.distribute_dofs (fe);
+ mg_dof_handler.distribute_mg_dofs (fe);
+
+ DoFTools::extract_locally_relevant_dofs (mg_dof_handler,
+ locally_relevant_set);
+
+ solution.reinit(mg_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD);
+ system_rhs.reinit(mg_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD);
+ constraints.reinit (locally_relevant_set);
+ hanging_node_constraints.reinit (locally_relevant_set);
+ DoFTools::make_hanging_node_constraints (mg_dof_handler, hanging_node_constraints);
+ DoFTools::make_hanging_node_constraints (mg_dof_handler, constraints);
+
+ typename FunctionMap::type dirichlet_boundary;
+ ConstantFunction homogeneous_dirichlet_bc (0.0);
+ dirichlet_boundary[0] = &homogeneous_dirichlet_bc;
+ VectorTools::interpolate_boundary_values (mg_dof_handler,
+ dirichlet_boundary,
+ constraints);
+ constraints.close ();
+ hanging_node_constraints.close ();
+
+ DynamicSparsityPattern dsp(mg_dof_handler.n_dofs(), mg_dof_handler.n_dofs());
+ DoFTools::make_sparsity_pattern (mg_dof_handler, dsp, constraints);
+ system_matrix.reinit (mg_dof_handler.locally_owned_dofs(), dsp, MPI_COMM_WORLD, true);
+
+
+ mg_constrained_dofs.clear();
+ mg_constrained_dofs.initialize(mg_dof_handler, dirichlet_boundary);
+
+ const unsigned int n_levels = triangulation.n_global_levels();
+
+ mg_interface_matrices.resize(0, n_levels-1);
+ mg_interface_matrices.clear_elements ();
+ mg_matrices.resize(0, n_levels-1);
+ mg_matrices.clear_elements ();
+
+ for (unsigned int level=0; level
+ void LaplaceProblem::assemble_system ()
+ {
+ const QGauss quadrature_formula(degree+1);
+
+ FEValues fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector cell_rhs (dofs_per_cell);
+
+ std::vector local_dof_indices (dofs_per_cell);
+
+ const Coefficient coefficient(K);
+ std::vector coefficient_values (n_q_points);
+
+ typename DoFHandler::active_cell_iterator
+ cell = mg_dof_handler.begin_active(),
+ endc = mg_dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (cell->is_locally_owned())
+ {
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ fe_values.reinit (cell);
+
+ coefficient.value_list (fe_values.get_quadrature_points(),
+ coefficient_values);
+
+ for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global (cell_matrix, cell_rhs,
+ local_dof_indices,
+ system_matrix, system_rhs);
+ }
+
+ system_matrix.compress(VectorOperation::add);
+ system_rhs.compress(VectorOperation::add);
+ }
+
+
+ template
+ void LaplaceProblem::assemble_multigrid ()
+ {
+ QGauss quadrature_formula(1+degree);
+
+ FEValues fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell);
+
+ std::vector local_dof_indices (dofs_per_cell);
+
+ const Coefficient coefficient (K);
+ std::vector coefficient_values (n_q_points);
+
+ std::vector boundary_constraints (triangulation.n_global_levels());
+ ConstraintMatrix empty_constraints;
+ for (unsigned int level=0; level::cell_iterator cell = mg_dof_handler.begin(),
+ endc = mg_dof_handler.end();
+
+ for (; cell!=endc; ++cell)
+ if (cell->level_subdomain_id()==triangulation.locally_owned_subdomain())
+ {
+ cell_matrix = 0;
+ fe_values.reinit (cell);
+
+ coefficient.value_list (fe_values.get_quadrature_points(),
+ coefficient_values);
+
+ for (unsigned int q_point=0; q_pointget_mg_dof_indices (local_dof_indices);
+
+ boundary_constraints[cell->level()]
+ .distribute_local_to_global (cell_matrix,
+ local_dof_indices,
+ mg_matrices[cell->level()]);
+ const IndexSet &interface_dofs_on_level
+ = mg_constrained_dofs.get_refinement_edge_indices(cell->level());
+ const unsigned int lvl = cell->level();
+
+
+ for (unsigned int i=0; ilevel()]);
+ }
+
+ for (unsigned int i=0; i
+ class MGCoarseAMG : public MGCoarseGridBase
+ {
+ public:
+ MGCoarseAMG(const LA::MPI::SparseMatrix &coarse_matrix,
+ const LA::MPI::PreconditionAMG::AdditionalData additional_data)
+ :
+ count(0)
+ {
+ precondition_amg.initialize(coarse_matrix, additional_data);
+ }
+
+ virtual void operator() (const unsigned int,
+ VECTOR &dst,
+ const VECTOR &src) const
+ {
+ ++count;
+ precondition_amg.vmult(dst, src);
+ }
+
+ mutable int count;
+
+ private:
+ LA::MPI::PreconditionAMG precondition_amg;
+ };
+
+ template
+ void LaplaceProblem::vcycle (const MGCoarseGridBase &coarse_grid_solver)
+ {
+ MGTransferPrebuilt mg_transfer(hanging_node_constraints, mg_constrained_dofs);
+ mg_transfer.build_matrices(mg_dof_handler);
+
+ typedef LA::MPI::PreconditionJacobi Smoother;
+ MGSmootherPrecondition mg_smoother;
+ mg_smoother.initialize(mg_matrices, Smoother::AdditionalData(0.5));
+ mg_smoother.set_steps(2);
+ mg::Matrix mg_matrix(mg_matrices);
+ mg::Matrix mg_interface_up(mg_interface_matrices);
+ mg::Matrix mg_interface_down(mg_interface_matrices);
+
+ Multigrid mg(mg_dof_handler,
+ mg_matrix,
+ coarse_grid_solver,
+ mg_transfer,
+ mg_smoother,
+ mg_smoother);
+
+ mg.set_edge_matrices(mg_interface_down, mg_interface_up);
+
+ PreconditionMG >
+ preconditioner(mg_dof_handler, mg, mg_transfer);
+
+ LA::MPI::Vector check1, check2, check3, tmp;
+ check1.reinit(mg_dof_handler.locally_owned_dofs(),MPI_COMM_WORLD);
+ check2=check1;
+ check3=check1;
+ tmp=check1;
+
+ check3 = 0.0;
+ // weighted Jacobi like fixed point iteration
+ for (unsigned int i=0; i<3; ++i)
+ {
+ system_matrix.vmult(tmp, check3);
+ tmp *= -1.0;
+ tmp+=system_rhs;
+ preconditioner.vmult(check2, tmp);
+ check3.sadd(0.75, 0.25, check2);
+ double residual = system_matrix.residual(tmp, check3, system_rhs);
+ pcout << "check residual:" << residual << std::endl;
+ }
+ }
+
+
+
+
+ template
+ void LaplaceProblem::solve ()
+ {
+ matrix_t &coarse_matrix = mg_matrices[0];
+ SolverControl coarse_solver_control (1000, 1e-8, false, false);
+ SolverCG coarse_solver(coarse_solver_control);
+
+ {
+ pcout << "CG(ID):" << std::endl;
+
+ PreconditionIdentity id;
+
+ MGCoarseGridIterativeSolver,
+ matrix_t,
+ PreconditionIdentity> coarse_grid_solver(coarse_solver,
+ coarse_matrix,
+ id);
+
+ vcycle(coarse_grid_solver);
+ pcout << "coarse iterations: " << coarse_solver_control.last_step() << std::endl;
+ }
+
+ {
+ pcout << "CG(ILU):" << std::endl;
+
+ LA::MPI::PreconditionILU prec;
+ prec.initialize(coarse_matrix);
+
+ MGCoarseGridIterativeSolver,
+ matrix_t,
+ LA::MPI::PreconditionILU> coarse_grid_solver(coarse_solver,
+ coarse_matrix,
+ prec);
+
+ vcycle(coarse_grid_solver);
+ pcout << "coarse iterations: " << coarse_solver_control.last_step() << std::endl;
+ }
+
+ {
+ pcout << "CG(AMG):" << std::endl;
+
+ LA::MPI::PreconditionAMG prec;
+ prec.initialize(coarse_matrix);
+
+ MGCoarseGridIterativeSolver,
+ matrix_t,
+ LA::MPI::PreconditionAMG> coarse_grid_solver(coarse_solver,
+ coarse_matrix,
+ prec);
+
+ vcycle(coarse_grid_solver);
+ pcout << "coarse iterations: " << coarse_solver_control.last_step() << std::endl;
+ }
+
+ {
+ pcout << "AMG:" << std::endl;
+ LA::MPI::PreconditionAMG::AdditionalData Amg_data;
+ Amg_data.elliptic = true;
+ Amg_data.higher_order_elements = true;
+ Amg_data.n_cycles = 1;
+ Amg_data.smoother_sweeps = 1;
+ MGCoarseAMG coarse_grid_solver(coarse_matrix, Amg_data);
+
+ vcycle(coarse_grid_solver);
+ pcout << "applications: " << coarse_grid_solver.count << std::endl;
+ }
+
+ }
+
+ template
+ void LaplaceProblem::refine_grid ()
+ {
+ Vector estimated_error_per_cell (triangulation.n_active_cells());
+
+ LA::MPI::Vector temp_solution;
+ temp_solution.reinit(locally_relevant_set, MPI_COMM_WORLD);
+ temp_solution = solution;
+
+ KellyErrorEstimator::estimate (static_cast&>(mg_dof_handler),
+ QGauss(degree+1),
+ typename FunctionMap::type(),
+ temp_solution,
+ estimated_error_per_cell);
+
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_fraction (triangulation,
+ estimated_error_per_cell,
+ 0.3, 0.0);
+
+ triangulation.execute_coarsening_and_refinement ();
+ }
+
+
+
+ template
+ void LaplaceProblem::output_results (const unsigned int cycle) const
+ {
+ }
+
+
+ template
+ void LaplaceProblem::run ()
+ {
+ for (unsigned int cycle=0; cycle<3; ++cycle)
+ {
+ pcout << "Cycle " << cycle << ':' << std::endl;
+
+ {
+ unsigned int n_subdiv = 6+5*cycle;
+ triangulation.clear();
+ GridGenerator::subdivided_hyper_cube (triangulation, n_subdiv, 0, 1);
+ triangulation.refine_global(1);
+ }
+
+ pcout << " Number of active cells: "
+ << triangulation.n_global_active_cells()
+ << std::endl;
+
+ setup_system ();
+
+ pcout << " Number of degrees of freedom: "
+ << mg_dof_handler.n_dofs()
+ << " (by level: ";
+ for (unsigned int level=0; level laplace_problem(3/*degree*/);
+ laplace_problem.run ();
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ throw;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ throw;
+ }
+
+ return 0;
+}
diff --git a/tests/multigrid/mg_coarse_01.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=1.output b/tests/multigrid/mg_coarse_01.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=1.output
new file mode 100644
index 0000000000..62a9fb8fb0
--- /dev/null
+++ b/tests/multigrid/mg_coarse_01.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=1.output
@@ -0,0 +1,69 @@
+Cycle 0:
+ Number of active cells: 144
+ Number of degrees of freedom: 1369 (by level: 361, 1369)
+CG(ID):
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+coarse iterations: 31
+CG(ILU):
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+coarse iterations: 19
+CG(AMG):
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+coarse iterations: 1
+AMG:
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+applications: 3
+Cycle 1:
+ Number of active cells: 484
+ Number of degrees of freedom: 4489 (by level: 1156, 4489)
+CG(ID):
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+coarse iterations: 56
+CG(ILU):
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+coarse iterations: 30
+CG(AMG):
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+coarse iterations: 1
+AMG:
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+applications: 3
+Cycle 2:
+ Number of active cells: 1024
+ Number of degrees of freedom: 9409 (by level: 2401, 9409)
+CG(ID):
+check residual:0.00872685
+check residual:0.00725329
+check residual:0.00652099
+coarse iterations: 80
+CG(ILU):
+check residual:0.00872685
+check residual:0.00725329
+check residual:0.00652099
+coarse iterations: 38
+CG(AMG):
+check residual:0.00872685
+check residual:0.00725329
+check residual:0.00652099
+coarse iterations: 22
+AMG:
+check residual:0.0114115
+check residual:0.0128114
+check residual:0.0135806
+applications: 3
diff --git a/tests/multigrid/mg_coarse_01.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=3.output b/tests/multigrid/mg_coarse_01.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=3.output
new file mode 100644
index 0000000000..6f1356aa46
--- /dev/null
+++ b/tests/multigrid/mg_coarse_01.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=3.output
@@ -0,0 +1,69 @@
+Cycle 0:
+ Number of active cells: 144
+ Number of degrees of freedom: 1369 (by level: 361, 1369)
+CG(ID):
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+coarse iterations: 31
+CG(ILU):
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+coarse iterations: 30
+CG(AMG):
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+coarse iterations: 1
+AMG:
+check residual:0.0231622
+check residual:0.0192417
+check residual:0.0172953
+applications: 3
+Cycle 1:
+ Number of active cells: 484
+ Number of degrees of freedom: 4489 (by level: 1156, 4489)
+CG(ID):
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+coarse iterations: 56
+CG(ILU):
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+coarse iterations: 50
+CG(AMG):
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+coarse iterations: 1
+AMG:
+check residual:0.012677
+check residual:0.0105349
+check residual:0.00947063
+applications: 3
+Cycle 2:
+ Number of active cells: 1024
+ Number of degrees of freedom: 9409 (by level: 2401, 9409)
+CG(ID):
+check residual:0.00872685
+check residual:0.00725329
+check residual:0.00652099
+coarse iterations: 80
+CG(ILU):
+check residual:0.00872685
+check residual:0.00725329
+check residual:0.00652099
+coarse iterations: 67
+CG(AMG):
+check residual:0.00872685
+check residual:0.00725329
+check residual:0.00652099
+coarse iterations: 20
+AMG:
+check residual:0.0115343
+check residual:0.0130869
+check residual:0.0139613
+applications: 3