ENDIF()
ENDFOREACH()
+ IF((TRILINOS_VERSION_MAJOR EQUAL 11 AND
+ NOT (TRILINOS_VERSION_MINOR LESS 14))
+ OR
+ (NOT (TRILINOS_VERSION_MAJOR LESS 12)))
+ ITEM_MATCHES(_module_found MueLu ${Trilinos_PACKAGE_LIST})
+ IF(_module_found)
+ MESSAGE(STATUS "Found MueLu")
+ ELSE()
+ MESSAGE(STATUS "Module MueLu not found!")
+ SET(_modules_missing "${_modules_missing} MueLu")
+ SET(${var} FALSE)
+ ENDIF()
+ ENDIF()
+
IF(NOT ${var})
MESSAGE(STATUS "Could not find a sufficient Trilinos installation: "
"Missing ${_modules_missing}"
<h3>Specific improvements</h3>
<ol>
+ <li> New: Add MueLu preconditioner from Trilinos. This is a new algebraic
+ multigrid package. The input parameters are almost the same as the ones
+ from ML so that the two preconditioners can be easily swapped.
+ <br>
+ (Bruno Turcksin, 2015/03/11)
+ </li>
+
<li> Fixed: Iterating over the elements of a TrilinosWrappers::SparseMatrix
object previously led to errors if the matrix was in fact stored in
parallel across multiple MPI processes. This is now fixed: rows not
// ---------------------------------------------------------------------
//
-// Copyright (C) 2008 - 2014 by the deal.II authors
+// Copyright (C) 2008 - 2015 by the deal.II authors
//
// This file is part of the deal.II library.
//
# include <Epetra_RowMatrix.h>
# include <Epetra_Vector.h>
+# if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+# include <MueLu.hpp>
+# include <Teuchos_RCP.hpp>
+# endif
+
// forward declarations
class Ifpack_Preconditioner;
class Ifpack_Chebyshev;
+#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+ /**
+ * This class implements an algebraic multigrid (AMG) preconditioner based
+ * on the Trilinos MueLu implementation, which is a black-box preconditioner
+ * that works well for many PDE-based linear problems. The interface of
+ * PreconditionerAMGMueLu is the same as the interface of PreconditionerAMG
+ * except for the higher_order_elements parameter which does not exist in
+ * PreconditionerAMGMueLu.
+ *
+ * This class requires Trilinos 11.14 or higher.
+ *
+ * This interface should not be considered as stable.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Preconditioners
+ * @author Bruno Turcksin, 2014
+ */
+ class PreconditionAMGMueLu : public PreconditionBase
+ {
+ public:
+
+
+ /**
+ * A data structure that is used to control details of how the algebraic
+ * multigrid is set up. The flags detailed in here are then passed to
+ * the Trilinos MueLu implementation. A structure of the current type are
+ * passed to the constructor of PreconditionAMGMueLu.
+ */
+ struct AdditionalData
+ {
+ /**
+ * Constructor. By default, we pretend to work on elliptic problems with
+ * linear finite elements on a scalar equation.
+ */
+ AdditionalData (const bool elliptic = true,
+ const unsigned int n_cycles = 1,
+ const bool w_cyle = false,
+ const double aggregation_threshold = 1e-4,
+ const std::vector<std::vector<bool> > &constant_modes = std::vector<std::vector<bool> > (0),
+ const unsigned int smoother_sweeps = 2,
+ const unsigned int smoother_overlap = 0,
+ const bool output_details = false,
+ const char *smoother_type = "Chebyshev",
+ const char *coarse_type = "Amesos-KLU");
+
+ /**
+ * Determines whether the AMG preconditioner should be optimized for
+ * elliptic problems (MueLu option smoothed aggregation SA, using a
+ * Chebyshev smoother) or for non-elliptic problems (MueLu option
+ * non-symmetric smoothed aggregation NSSA, smoother is SSOR with
+ * underrelaxation).
+ */
+ bool elliptic;
+
+ /**
+ * Defines how many multigrid cycles should be performed by the
+ * preconditioner.
+ */
+ unsigned int n_cycles;
+
+ /**
+ * Defines whether a w-cycle should be used instead of the standard
+ * setting of a v-cycle.
+ */
+ bool w_cycle;
+
+ /**
+ * This threshold tells the AMG setup how the coarsening should be
+ * performed. In the AMG used by MueLu, all points that strongly couple
+ * with the tentative coarse-level point form one aggregate. The term
+ * <em>strong coupling</em> is controlled by the variable
+ * <tt>aggregation_threshold</tt>, meaning that all elements that are
+ * not smaller than <tt>aggregation_threshold</tt> times the diagonal
+ * element do couple strongly.
+ */
+ double aggregation_threshold;
+
+ /**
+ * Specifies the constant modes (near null space) of the matrix. This
+ * parameter tells AMG whether we work on a scalar equation (where the
+ * near null space only consists of ones) or on a vector-valued
+ * equation.
+ */
+ std::vector<std::vector<bool> > constant_modes;
+
+ /**
+ * Determines how many sweeps of the smoother should be performed. When
+ * the flag <tt>elliptic</tt> is set to <tt>true</tt>, i.e., for
+ * elliptic or almost elliptic problems, the polynomial degree of the
+ * Chebyshev smoother is set to <tt>smoother_sweeps</tt>. The term
+ * sweeps refers to the number of matrix-vector products performed in
+ * the Chebyshev case. In the non-elliptic case,
+ * <tt>smoother_sweeps</tt> sets the number of SSOR relaxation sweeps
+ * for post-smoothing to be performed.
+ */
+ unsigned int smoother_sweeps;
+
+ /**
+ * Determines the overlap in the SSOR/Chebyshev error smoother when run
+ * in parallel.
+ */
+ unsigned int smoother_overlap;
+
+ /**
+ * If this flag is set to <tt>true</tt>, then internal information from
+ * the ML preconditioner is printed to screen. This can be useful when
+ * debugging the preconditioner.
+ */
+ bool output_details;
+
+ /**
+ * Determines which smoother to use for the AMG cycle. Possibilities
+ * for smoother_type are the following:
+ * <ul>
+ * <li> "Aztec" </li>
+ * <li> "IFPACK" </li>
+ * <li> "Jacobi" </li>
+ * <li> "ML symmetric Gauss-Seidel" </li>
+ * <li> "symmetric Gauss-Seidel" </li>
+ * <li> "ML Gauss-Seidel" </li>
+ * <li> "Gauss-Seidel" </li>
+ * <li> "block Gauss-Seidel" </li>
+ * <li> "symmetric block Gauss-Seidel" </li>
+ * <li> "Chebyshev" </li>
+ * <li> "MLS" </li>
+ * <li> "Hiptmair" </li>
+ * <li> "Amesos-KLU" </li>
+ * <li> "Amesos-Superlu" </li>
+ * <li> "Amesos-UMFPACK" </li>
+ * <li> "Amesos-Superludist" </li>
+ * <li> "Amesos-MUMPS" </li>
+ * <li> "user-defined" </li>
+ * <li> "SuperLU" </li>
+ * <li> "IFPACK-Chebyshev" </li>
+ * <li> "self" </li>
+ * <li> "do-nothing" </li>
+ * <li> "IC" </li>
+ * <li> "ICT" </li>
+ * <li> "ILU" </li>
+ * <li> "ILUT" </li>
+ * <li> "Block Chebyshev" </li>
+ * <li> "IFPACK-Block Chebyshev" </li>
+ * </ul>
+ */
+ const char *smoother_type;
+
+ /**
+ * Determines which solver to use on the coarsest level. The same
+ * settings as for the smoother type are possible.
+ */
+ const char *coarse_type;
+ };
+
+ /**
+ * Destructor.
+ */
+ ~PreconditionAMGMueLu();
+
+ /**
+ * Let Trilinos compute a multilevel hierarchy for the solution of a
+ * linear system with the given matrix. The function uses the matrix
+ * format specified in TrilinosWrappers::SparseMatrix.
+ */
+ void initialize (const SparseMatrix &matrix,
+ const AdditionalData &additional_data = AdditionalData());
+
+ /**
+ * Let Trilinos compute a multilevel hierarchy for the solution of a
+ * linear system with the given matrix. As opposed to the other initialize
+ * function above, this function uses an object of type Epetra_CrsMatrixCrs.
+ */
+ void initialize (const Epetra_CrsMatrix &matrix,
+ const AdditionalData &additional_data = AdditionalData());
+
+ /**
+ * Let Trilinos compute a multilevel hierarchy for the solution of a
+ * linear system with the given matrix. The function uses the matrix
+ * format specified in TrilinosWrappers::SparseMatrix.
+ *
+ * This function is similar to the one above, but allows the user to set
+ * most of the options of the Trilinos ML preconditioner. In order to find out
+ * about all the options for ML, we refer to the <a
+ * href=http://trilinos.sandia.gov/packages/ml/mlguide5.pdf>ML user's
+ * guide</a>. Not all ML options have a corresponding MueLu option.
+ */
+ void initialize (const SparseMatrix &matrix,
+ Teuchos::ParameterList &muelu_parameters);
+
+ /**
+ * Let Trilinos compute a multilevel hierarchy for the solution of a
+ * linear system with the given matrix. As opposed to the other initialize
+ * function above, this function uses an object of type Epetra_CrsMatrix.
+ */
+ void initialize (const Epetra_CrsMatrix &matrix,
+ Teuchos::ParameterList &muelu_parameters);
+
+ /**
+ * Let Trilinos compute a multilevel hierarchy for the solution of a
+ * linear system with the given matrix. This function takes a deal.ii
+ * matrix and copies the content into a Trilinos matrix, so the function
+ * can be considered rather inefficient.
+ */
+ template <typename number>
+ void initialize (const ::dealii::SparseMatrix<number> &deal_ii_sparse_matrix,
+ const AdditionalData &additional_data = AdditionalData(),
+ const double drop_tolerance = 1e-13,
+ const ::dealii::SparsityPattern *use_this_sparsity = 0);
+
+ /**
+ * Destroys the preconditioner, leaving an object like just after having
+ * called the constructor.
+ */
+ void clear ();
+
+ /**
+ * Prints an estimate of the memory consumption of this class.
+ */
+ size_type memory_consumption () const;
+
+ private:
+ /**
+ * A copy of the deal.II matrix into Trilinos format.
+ */
+ std_cxx11::shared_ptr<SparseMatrix> trilinos_matrix;
+ };
+#endif
+
+
+
/**
* A wrapper class for an identity preconditioner for Trilinos matrices.
*
# include <ml_include.h>
# include <ml_MultiLevelPreconditioner.h>
+#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+#include <MueLu_EpetraOperator.hpp>
+#include <MueLu_MLParameterListInterpreter.hpp>
+#endif
DEAL_II_NAMESPACE_OPEN
+#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+ /* -------------------------- PreconditionAMGMueLu --------------------- */
- /* -------------------------- PreconditionAMG -------------------------- */
+ PreconditionAMGMueLu::AdditionalData::
+ AdditionalData (const bool elliptic,
+ const unsigned int n_cycles,
+ const bool w_cycle,
+ const double aggregation_threshold,
+ const std::vector<std::vector<bool> > &constant_modes,
+ const unsigned int smoother_sweeps,
+ const unsigned int smoother_overlap,
+ const bool output_details,
+ const char *smoother_type,
+ const char *coarse_type)
+ :
+ elliptic (elliptic),
+ n_cycles (n_cycles),
+ w_cycle (w_cycle),
+ aggregation_threshold (aggregation_threshold),
+ constant_modes (constant_modes),
+ smoother_sweeps (smoother_sweeps),
+ smoother_overlap (smoother_overlap),
+ output_details (output_details),
+ smoother_type (smoother_type),
+ coarse_type (coarse_type)
+ {}
+
+
+ PreconditionAMGMueLu::~PreconditionAMGMueLu()
+ {
+ preconditioner.reset();
+ trilinos_matrix.reset();
+ }
+
+
+
+ void
+ PreconditionAMGMueLu::initialize (const SparseMatrix &matrix,
+ const AdditionalData &additional_data)
+ {
+ initialize(matrix.trilinos_matrix(), additional_data);
+ }
+
+
+
+ void
+ PreconditionAMGMueLu::initialize (const Epetra_CrsMatrix &matrix,
+ const AdditionalData &additional_data)
+ {
+ // Build the AMG preconditioner.
+ Teuchos::ParameterList parameter_list;
+
+ if (additional_data.elliptic == true)
+ ML_Epetra::SetDefaults("SA",parameter_list);
+ else
+ {
+ ML_Epetra::SetDefaults("NSSA",parameter_list);
+ parameter_list.set("aggregation: block scaling", true);
+ }
+ // MIS does not exist anymore, only choice are uncoupled and coupled. When using
+ // uncoupled, aggregates cannot span multiple processes. When using coupled
+ // aggregates can span multiple processes.
+ parameter_list.set("aggregation: type", "Uncoupled");
+
+ parameter_list.set("smoother: type", additional_data.smoother_type);
+ parameter_list.set("coarse: type", additional_data.coarse_type);
+
+ parameter_list.set("smoother: sweeps",
+ static_cast<int>(additional_data.smoother_sweeps));
+ parameter_list.set("cycle applications",
+ static_cast<int>(additional_data.n_cycles));
+ if (additional_data.w_cycle == true)
+ parameter_list.set("prec type", "MGW");
+ else
+ parameter_list.set("prec type", "MGV");
+
+ parameter_list.set("smoother: Chebyshev alpha",10.);
+ parameter_list.set("smoother: ifpack overlap",
+ static_cast<int>(additional_data.smoother_overlap));
+ parameter_list.set("aggregation: threshold",
+ additional_data.aggregation_threshold);
+ parameter_list.set("coarse: max size", 2000);
+
+ if (additional_data.output_details)
+ parameter_list.set("ML output", 10);
+ else
+ parameter_list.set("ML output", 0);
+
+ const Epetra_Map &domain_map = matrix.OperatorDomainMap();
+
+ const size_type constant_modes_dimension =
+ additional_data.constant_modes.size();
+ Epetra_MultiVector distributed_constant_modes (domain_map,
+ constant_modes_dimension > 0 ?
+ constant_modes_dimension : 1);
+ std::vector<double> dummy (constant_modes_dimension);
+
+ if (constant_modes_dimension > 0)
+ {
+ const size_type n_rows = n_global_rows(matrix);
+ const bool constant_modes_are_global =
+ additional_data.constant_modes[0].size() == n_rows;
+ const size_type n_relevant_rows =
+ constant_modes_are_global ? n_rows : additional_data.constant_modes[0].size();
+ const size_type my_size = domain_map.NumMyElements();
+ if (constant_modes_are_global == false)
+ Assert (n_relevant_rows == my_size,
+ ExcDimensionMismatch(n_relevant_rows, my_size));
+ Assert (n_rows ==
+ static_cast<size_type>(global_length(distributed_constant_modes)),
+ ExcDimensionMismatch(n_rows,
+ global_length(distributed_constant_modes)));
+
+ // Reshape null space as a contiguous vector of doubles so that
+ // Trilinos can read from it.
+ for (size_type d=0; d<constant_modes_dimension; ++d)
+ for (size_type row=0; row<my_size; ++row)
+ {
+ TrilinosWrappers::types::int_type global_row_id =
+ constant_modes_are_global ? gid(domain_map,row) : row;
+ distributed_constant_modes[d][row] =
+ additional_data.constant_modes[d][global_row_id];
+ }
+
+ parameter_list.set("null space: type", "pre-computed");
+ parameter_list.set("null space: dimension",
+ distributed_constant_modes.NumVectors());
+ if (my_size > 0)
+ parameter_list.set("null space: vectors",
+ distributed_constant_modes.Values());
+ // We need to set a valid pointer to data even if there is no data on
+ // the current processor. Therefore, pass a dummy in that case
+ else
+ parameter_list.set("null space: vectors",
+ &dummy[0]);
+ }
+
+ initialize (matrix, parameter_list);
+ }
+
+
+
+ void
+ PreconditionAMGMueLu::initialize (const SparseMatrix &matrix,
+ Teuchos::ParameterList &muelu_parameters)
+ {
+ initialize(matrix.trilinos_matrix(), muelu_parameters);
+ }
+
+
+
+ void
+ PreconditionAMGMueLu::initialize (const Epetra_CrsMatrix &matrix,
+ Teuchos::ParameterList &muelu_parameters)
+ {
+ // We cannot use MueLu::CreateEpetraOperator directly because, we cannot
+ // transfer ownership of MueLu::EpetraOperator from Teuchos::RCP to
+ // std::shared_ptr.
+
+ // For now, just use serial node, i.e. no multithreaing or GPU.
+ typedef KokkosClassic::DefaultNode::DefaultNodeType node;
+ preconditioner.reset ();
+
+ // Cast matrix into a MueLu::Matrix. The constness needs to be cast away.
+ // MueLu uses Teuchos::RCP which are Trilinos version of std::shared_ptr.
+ Teuchos::RCP<Epetra_CrsMatrix> rcp_matrix = Teuchos::rcpFromRef(
+ *(const_cast<Epetra_CrsMatrix *>(&matrix)));
+ Teuchos::RCP<Xpetra::CrsMatrix<double,int,int,node> > muelu_crs_matrix =
+ Teuchos::rcp(new Xpetra::EpetraCrsMatrix (rcp_matrix));
+ Teuchos::RCP<Xpetra::Matrix<double,int,int,node> > muelu_matrix =
+ Teuchos::rcp(new Xpetra::CrsMatrixWrap<double,int,int,node> (muelu_crs_matrix));
+
+ // Create the multigrid hierarchy using ML parameters.
+ Teuchos::RCP<MueLu::HierarchyManager<double,int,int,node> > hierarchy_factory;
+ hierarchy_factory = Teuchos::rcp(
+ new MueLu::MLParameterListInterpreter<double,int,int,node> (muelu_parameters));
+ Teuchos::RCP<MueLu::Hierarchy<double,int,int,node> > hierarchy = hierarchy_factory->CreateHierarchy();
+ hierarchy->GetLevel(0)->Set("A",muelu_matrix);
+ hierarchy_factory->SetupHierarchy(*hierarchy);
+
+ // MueLu::EpetraOperator is just a wrapper around a "standard"
+ // Epetra_Operator.
+ preconditioner.reset(new MueLu::EpetraOperator(hierarchy));
+ }
+
+
+
+ template <typename number>
+ void
+ PreconditionAMGMueLu::
+ initialize (const ::dealii::SparseMatrix<number> &deal_ii_sparse_matrix,
+ const AdditionalData &additional_data,
+ const double drop_tolerance,
+ const ::dealii::SparsityPattern *use_this_sparsity)
+ {
+ preconditioner.reset();
+ const size_type n_rows = deal_ii_sparse_matrix.m();
+
+ // Init Epetra Matrix using an
+ // equidistributed map; avoid
+ // storing the nonzero
+ // elements.
+ vector_distributor.reset (new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n_rows),
+ 0, communicator));
+
+ if (trilinos_matrix.get() == 0)
+ trilinos_matrix.reset (new SparseMatrix());
+
+ trilinos_matrix->reinit (*vector_distributor, *vector_distributor,
+ deal_ii_sparse_matrix, drop_tolerance, true,
+ use_this_sparsity);
+
+ initialize (*trilinos_matrix, additional_data);
+ }
+
+
+
+ void PreconditionAMGMueLu::clear ()
+ {
+ PreconditionBase::clear();
+ trilinos_matrix.reset();
+ }
+
+
+
+ PreconditionAMGMueLu::size_type
+ PreconditionAMGMueLu::memory_consumption() const
+ {
+ unsigned int memory = sizeof(this);
+
+ // todo: find a way to read out ML's data
+ // sizes
+ if (trilinos_matrix.get() != 0)
+ memory += trilinos_matrix->memory_consumption();
+ return memory;
+ }
+
+
+
+
+ // explicit instantiations
+ template void PreconditionAMGMueLu::initialize (const ::dealii::SparseMatrix<double> &,
+ const AdditionalData &, const double,
+ const ::dealii::SparsityPattern *);
+ template void PreconditionAMGMueLu::initialize (const ::dealii::SparseMatrix<float> &,
+ const AdditionalData &, const double,
+ const ::dealii::SparsityPattern *);
+#endif
+
+
+
+ /* -------------------------- PreconditionIdentity --------------------- */
void
PreconditionIdentity::vmult(VectorBase &dst,
--- /dev/null
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2008 - 2014 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE at
+ * the top level of the deal.II distribution.
+ *
+ * ---------------------------------------------------------------------
+ */
+
+//
+// Check MueLue in parallel. This test is the same as periodicity_02 except that
+// the preconditioner is MueLu.
+//
+
+#define PERIODIC
+
+#include "../tests.h"
+#include <deal.II/base/conditional_ostream.h>
+
+#include <deal.II/distributed/grid_refinement.h>
+
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/lac/constraint_matrix.h>
+
+#include <deal.II/lac/trilinos_solver.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_block_sparse_matrix.h>
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/block_sparsity_pattern.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria_boundary_lib.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/numerics/error_estimator.h>
+
+namespace Step22
+{
+ using namespace dealii;
+
+ template <int dim>
+ class StokesProblem
+ {
+ public:
+ StokesProblem (const unsigned int degree);
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_system ();
+ void solve ();
+ void get_point_value (const Point<dim> point, const int proc,
+ Vector<double> &value) const;
+ void check_periodicity(const unsigned int cycle) const;
+ void output_results (const unsigned int refinement_cycle) const;
+ void refine_mesh ();
+
+ const unsigned int degree;
+
+ MPI_Comm mpi_communicator;
+
+ HyperShellBoundary<dim> boundary;
+ parallel::distributed::Triangulation<dim> triangulation;
+ FESystem<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ ConstraintMatrix constraints;
+ std::vector<IndexSet> owned_partitioning;
+ std::vector<IndexSet> relevant_partitioning;
+
+ TrilinosWrappers::BlockSparsityPattern sparsity_pattern;
+ TrilinosWrappers::BlockSparseMatrix system_matrix;
+
+ TrilinosWrappers::MPI::BlockVector solution;
+ TrilinosWrappers::MPI::BlockVector system_rhs;
+
+ ConditionalOStream pcout;
+ };
+
+
+
+ template <int dim>
+ class BoundaryValues : public Function<dim>
+ {
+ public:
+ BoundaryValues () : Function<dim>(dim+1) {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+
+ virtual void vector_value (const Point<dim> &p,
+ Vector<double> &value) const;
+ };
+
+
+ template <int dim>
+ double
+ BoundaryValues<dim>::value (const Point<dim> &p,
+ const unsigned int component) const
+ {
+ Assert (component < this->n_components,
+ ExcIndexRange (component, 0, this->n_components));
+
+// if (component == 0)
+// return (p[1]-.5)*(1.-p[1]);
+// return 0;
+ return (1-2*(component==0))*p[(component+1)%2]/(p[0]*p[0]+p[1]*p[1]);
+ }
+
+
+ template <int dim>
+ void
+ BoundaryValues<dim>::vector_value (const Point<dim> &p,
+ Vector<double> &values) const
+ {
+ for (unsigned int c=0; c<this->n_components; ++c)
+ values(c) = BoundaryValues<dim>::value (p, c);
+ }
+
+
+
+ template <int dim>
+ class RightHandSide : public Function<dim>
+ {
+ public:
+ RightHandSide () : Function<dim>(dim+1) {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+
+ virtual void vector_value (const Point<dim> &p,
+ Vector<double> &value) const;
+
+ };
+
+
+ template <int dim>
+ double
+ RightHandSide<dim>::value (const Point<dim> &/*p*/,
+ const unsigned int /*component*/) const
+ {
+ return 0;
+ }
+
+
+ template <int dim>
+ void
+ RightHandSide<dim>::vector_value (const Point<dim> &p,
+ Vector<double> &values) const
+ {
+ for (unsigned int c=0; c<this->n_components; ++c)
+ values(c) = RightHandSide<dim>::value (p, c);
+ }
+
+
+
+
+
+ template <class Matrix, class Preconditioner>
+ class InverseMatrix : public Preconditioner
+ {
+ public:
+ InverseMatrix (const Matrix &m,
+ const Preconditioner &preconditioner,
+ const IndexSet &locally_owned,
+ const MPI_Comm &mpi_communicator);
+
+ void vmult (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const;
+
+ private:
+ const SmartPointer<const Matrix> matrix;
+ const SmartPointer<const Preconditioner> preconditioner;
+
+ const MPI_Comm *mpi_communicator;
+ mutable TrilinosWrappers::MPI::Vector tmp;
+ };
+
+
+ template <class Matrix, class Preconditioner>
+ InverseMatrix<Matrix,Preconditioner>::InverseMatrix
+ (const Matrix &m,
+ const Preconditioner &preconditioner,
+ const IndexSet &locally_owned,
+ const MPI_Comm &mpi_communicator)
+ :
+ matrix (&m),
+ preconditioner (&preconditioner),
+ mpi_communicator (&mpi_communicator),
+ tmp(locally_owned, mpi_communicator)
+ {}
+
+
+
+ template <class Matrix, class Preconditioner>
+ void InverseMatrix<Matrix,Preconditioner>::vmult
+ (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const
+ {
+ SolverControl solver_control (src.size(), 1e-6*src.l2_norm(), false, false);
+ TrilinosWrappers::SolverCG cg (solver_control,
+ TrilinosWrappers::SolverCG::AdditionalData());
+
+ tmp = 0.;
+ cg.solve (*matrix, tmp, src, *preconditioner);
+ dst = tmp;
+ }
+
+
+
+ template <class Preconditioner>
+ class SchurComplement : public TrilinosWrappers::SparseMatrix
+ {
+ public:
+ SchurComplement ( const TrilinosWrappers::BlockSparseMatrix &system_matrix,
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ Preconditioner> &A_inverse,
+ const IndexSet &owned_pres,
+ const IndexSet &relevant_pres,
+ const MPI_Comm &mpi_communicator);
+
+ void vmult (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const;
+
+ private:
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> system_matrix;
+ const SmartPointer<const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ Preconditioner> > A_inverse;
+ mutable TrilinosWrappers::MPI::Vector tmp1, tmp2;
+ };
+
+
+
+ template <class Preconditioner>
+ SchurComplement<Preconditioner>::
+ SchurComplement (const TrilinosWrappers::BlockSparseMatrix &system_matrix,
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ Preconditioner> &A_inverse,
+ const IndexSet &owned_vel,
+ const IndexSet &relevant_vel,
+ const MPI_Comm &mpi_communicator)
+ :
+ system_matrix (&system_matrix),
+ A_inverse (&A_inverse),
+ tmp1 (owned_vel, mpi_communicator),
+ tmp2 (tmp1)
+ {}
+
+
+ template <class Preconditioner>
+ void SchurComplement<Preconditioner>::vmult
+ (TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const
+ {
+ system_matrix->block(0,1).vmult (tmp1, src);
+ A_inverse->vmult (tmp2, tmp1);
+ system_matrix->block(1,0).vmult (dst, tmp2);
+ }
+
+
+
+
+ template <int dim>
+ StokesProblem<dim>::StokesProblem (const unsigned int degree)
+ :
+ degree (degree),
+ mpi_communicator (MPI_COMM_WORLD),
+ triangulation (mpi_communicator/*,
+ Triangulation<dim>::maximum_smoothing*/),
+ fe (FE_Q<dim>(degree+1), dim,
+ FE_Q<dim>(degree) , 1),
+ dof_handler (triangulation),
+ pcout (Utilities::MPI::this_mpi_process(mpi_communicator)
+ == 0
+ ?
+ deallog.get_file_stream()
+ :
+ std::cout,
+ (Utilities::MPI::this_mpi_process(mpi_communicator)
+ == 0))
+ {}
+
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::setup_dofs ()
+ {
+ dof_handler.distribute_dofs (fe);
+
+ std::vector<unsigned int> block_component (dim+1,0);
+ block_component[dim] = 1;
+ DoFRenumbering::component_wise (dof_handler, block_component);
+
+ std::vector<types::global_dof_index> dofs_per_block (2);
+ DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, block_component);
+ const unsigned int n_u = dofs_per_block[0],
+ n_p = dofs_per_block[1];
+
+ {
+ owned_partitioning.clear();
+ IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
+ owned_partitioning.push_back(locally_owned_dofs.get_view(0, n_u));
+ owned_partitioning.push_back(locally_owned_dofs.get_view(n_u, n_u+n_p));
+
+ relevant_partitioning.clear();
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs (dof_handler, locally_relevant_dofs);
+ relevant_partitioning.push_back(locally_relevant_dofs.get_view(0, n_u));
+ relevant_partitioning.push_back(locally_relevant_dofs.get_view(n_u, n_u+n_p));
+ }
+
+ {
+ constraints.clear ();
+
+ FEValuesExtractors::Vector velocities(0);
+ FEValuesExtractors::Scalar pressure(dim);
+
+ DoFTools::make_hanging_node_constraints (dof_handler,
+ constraints);
+#ifdef PERIODIC
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 3,
+ BoundaryValues<dim>(),
+ constraints,
+ fe.component_mask(velocities));
+#endif
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ BoundaryValues<dim>(),//ZeroFunction<dim>(dim+1),
+ constraints,
+ fe.component_mask(velocities));
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 1,
+ BoundaryValues<dim>(),//ZeroFunction<dim>(dim+1),
+ constraints,
+ fe.component_mask(velocities));
+
+#ifdef PERIODIC
+ std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> >
+ periodicity_vector;
+
+ FullMatrix<double> matrix(dim);
+ matrix[0][1]=1.;
+ matrix[1][0]=-1.;
+
+ std::vector<unsigned int> first_vector_components;
+ first_vector_components.push_back(0);
+
+ GridTools::collect_periodic_faces
+ (dof_handler, 2, 3, 1, periodicity_vector, Tensor<1,dim>(),
+ matrix, first_vector_components);
+
+ DoFTools::make_periodicity_constraints<DoFHandler<dim> >
+ (periodicity_vector, constraints, fe.component_mask(velocities));
+#endif
+ }
+
+ constraints.close ();
+
+ {
+ TrilinosWrappers::BlockSparsityPattern bsp
+ (owned_partitioning, owned_partitioning,
+ relevant_partitioning, mpi_communicator);
+
+ DoFTools::make_sparsity_pattern
+ (dof_handler, bsp, constraints, false,
+ Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ bsp.compress();
+
+ system_matrix.reinit (bsp);
+ }
+
+ system_rhs.reinit (owned_partitioning,
+ mpi_communicator);
+ solution.reinit (owned_partitioning, relevant_partitioning,
+ mpi_communicator);
+ }
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::assemble_system ()
+ {
+ system_matrix=0.;
+ system_rhs=0.;
+
+ QGauss<dim> quadrature_formula(degree+2);
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values |
+ update_quadrature_points |
+ update_JxW_values |
+ update_gradients);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> local_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> local_rhs (dofs_per_cell);
+
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ const RightHandSide<dim> right_hand_side;
+ std::vector<Vector<double> > rhs_values (n_q_points,
+ Vector<double>(dim+1));
+
+ const FEValuesExtractors::Vector velocities (0);
+ const FEValuesExtractors::Scalar pressure (dim);
+
+ std::vector<SymmetricTensor<2,dim> > symgrad_phi_u (dofs_per_cell);
+ std::vector<double> div_phi_u (dofs_per_cell);
+ std::vector<double> phi_p (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (cell->is_locally_owned())
+ {
+ fe_values.reinit (cell);
+ local_matrix = 0;
+ local_rhs = 0;
+
+ right_hand_side.vector_value_list(fe_values.get_quadrature_points(),
+ rhs_values);
+
+ for (unsigned int q=0; q<n_q_points; ++q)
+ {
+ for (unsigned int k=0; k<dofs_per_cell; ++k)
+ {
+ symgrad_phi_u[k] = fe_values[velocities].symmetric_gradient (k, q);
+ div_phi_u[k] = fe_values[velocities].divergence (k, q);
+ phi_p[k] = fe_values[pressure].value (k, q);
+ }
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<=i; ++j)
+ {
+ local_matrix(i,j) += (symgrad_phi_u[i] * symgrad_phi_u[j]
+ - div_phi_u[i] * phi_p[j]
+ - phi_p[i] * div_phi_u[j]
+ + phi_p[i] * phi_p[j])
+ * fe_values.JxW(q);
+
+ }
+
+ const unsigned int component_i =
+ fe.system_to_component_index(i).first;
+ local_rhs(i) += fe_values.shape_value(i,q) *
+ rhs_values[q](component_i) *
+ fe_values.JxW(q);
+ }
+ }
+
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ for (unsigned int j=i+1; j<dofs_per_cell; ++j)
+ local_matrix(i,j) = local_matrix(j,i);
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global (local_matrix, local_rhs,
+ local_dof_indices,
+ system_matrix, system_rhs);
+ }
+
+ system_matrix.compress (VectorOperation::add);
+ system_rhs.compress (VectorOperation::add);
+
+ pcout << " Computing preconditioner..." << std::endl << std::flush;
+ }
+
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::solve ()
+ {
+ TrilinosWrappers::PreconditionJacobi A_preconditioner;
+ A_preconditioner.initialize(system_matrix.block(0,0));
+
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ TrilinosWrappers::PreconditionJacobi>
+ A_inverse (system_matrix.block(0,0),
+ A_preconditioner,
+ owned_partitioning[0],
+ mpi_communicator);
+
+ TrilinosWrappers::MPI::BlockVector tmp (owned_partitioning,
+ mpi_communicator);
+
+ {
+ TrilinosWrappers::MPI::Vector schur_rhs (owned_partitioning[1],
+ mpi_communicator);
+ A_inverse.vmult (tmp.block(0), system_rhs.block(0));
+ system_matrix.block(1,0).vmult (schur_rhs, tmp.block(0));
+ schur_rhs -= system_rhs.block(1);
+
+ SchurComplement<TrilinosWrappers::PreconditionJacobi>
+ schur_complement (system_matrix, A_inverse,
+ owned_partitioning[0], relevant_partitioning[0],
+ mpi_communicator);
+
+ SolverControl solver_control (solution.block(1).size(),
+ 1e-6*schur_rhs.l2_norm(), false, false);
+ SolverCG<TrilinosWrappers::MPI::Vector> cg(solver_control);
+
+ TrilinosWrappers::PreconditionAMGMueLu preconditioner;
+ preconditioner.initialize (system_matrix.block(1,1));
+
+ InverseMatrix<TrilinosWrappers::SparseMatrix,
+ TrilinosWrappers::PreconditionAMGMueLu>
+ m_inverse (system_matrix.block(1,1), preconditioner,
+ owned_partitioning[1], mpi_communicator);
+
+ cg.solve (schur_complement,
+ tmp.block(1),
+ schur_rhs,
+ preconditioner);
+
+ constraints.distribute (tmp);
+ solution.block(1)=tmp.block(1);
+ }
+
+ {
+ system_matrix.block(0,1).vmult (tmp.block(0), tmp.block(1));
+ tmp.block(0) *= -1;
+ tmp.block(0) += system_rhs.block(0);
+
+ A_inverse.vmult (tmp.block(0), tmp.block(0));
+
+ constraints.distribute (tmp);
+ solution.block(0)=tmp.block(0);
+ }
+ }
+
+ template <int dim>
+ void StokesProblem<dim>::get_point_value
+ (const Point<dim> point, const int proc, Vector<double> &value) const
+ {
+ typename DoFHandler<dim>::active_cell_iterator cell
+ = GridTools::find_active_cell_around_point (dof_handler, point);
+
+ if (cell->is_locally_owned())
+ VectorTools::point_value (dof_handler, solution,
+ point, value);
+
+ std::vector<double> tmp (value.size());
+ for (unsigned int i=0; i<value.size(); ++i)
+ tmp[i]=value[i];
+
+ std::vector<double> tmp2 (value.size());
+ MPI_Reduce(&(tmp[0]), &(tmp2[0]), value.size(), MPI_DOUBLE,
+ MPI_SUM, proc, mpi_communicator);
+
+ for (unsigned int i=0; i<value.size(); ++i)
+ value[i]=tmp2[i];
+ }
+
+ template <int dim>
+ void StokesProblem<dim>::check_periodicity (const unsigned int cycle) const
+ {}
+
+ template <>
+ void StokesProblem<2>::check_periodicity (const unsigned int cycle) const
+ {
+ unsigned int n_points = 4;
+ for (unsigned int i = 0; i<cycle; i++)
+ n_points*=2;
+
+ for (unsigned int i=1; i< n_points; i++)
+ {
+ Vector<double> value1(3);
+ Vector<double> value2(3);
+
+ Point<2> point1;
+ point1(0)=0;
+ point1(1)=.5*(1.+1.*i/n_points);
+ Point<2> point2;
+ point2(0)=.5*(1.+1.*i/n_points);
+ point2(1)=0.;
+
+ get_point_value (point1, 0, value1);
+ get_point_value (point2, 0, value2);
+
+ if (Utilities::MPI::this_mpi_process(mpi_communicator)==0)
+ {
+ pcout << point1 << "\t" << value1[0] << "\t" << value1[1] << std::endl;
+ if (std::abs(value2[0]-value1[1])>1e-8)
+ {
+ std::cout<<point1<< "\t" << value1[1] << std::endl;
+ std::cout<<point2<< "\t" << value2[0] << std::endl;
+ Assert(false, ExcInternalError());
+ }
+ if (std::abs(value2[1]+value1[0])>1e-8)
+ {
+ std::cout<<point1<< "\t" << value1[0] << std::endl;
+ std::cout<<point2<< "\t" << value2[1] << std::endl;
+ Assert(false, ExcInternalError());
+ }
+ }
+ }
+ }
+
+
+ template <int dim>
+ void
+ StokesProblem<dim>::output_results (const unsigned int refinement_cycle) const
+ {
+ std::vector<std::string> solution_names (dim, "velocity");
+ solution_names.push_back ("pressure");
+
+ std::vector<DataComponentInterpretation::DataComponentInterpretation>
+ data_component_interpretation
+ (dim, DataComponentInterpretation::component_is_part_of_vector);
+ data_component_interpretation
+ .push_back (DataComponentInterpretation::component_is_scalar);
+
+ DataOut<dim> data_out;
+ data_out.attach_dof_handler (dof_handler);
+ data_out.add_data_vector (solution, solution_names,
+ DataOut<dim>::type_dof_data,
+ data_component_interpretation);
+ Vector<float> subdomain (triangulation.n_active_cells());
+ for (unsigned int i=0; i<subdomain.size(); ++i)
+ subdomain(i) = triangulation.locally_owned_subdomain();
+ data_out.add_data_vector (subdomain, "subdomain");
+ data_out.build_patches (degree+1);
+
+ std::ostringstream filename;
+ filename << "solution-"
+ << Utilities::int_to_string (refinement_cycle, 2)
+ << "."
+ << Utilities::int_to_string (triangulation.locally_owned_subdomain(),2)
+ << ".vtu";
+
+ std::ofstream output (filename.str().c_str());
+ data_out.write_vtu (output);
+
+ if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
+ {
+ std::vector<std::string> filenames;
+ for (unsigned int i=0; i<Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
+ filenames.push_back (std::string("solution-") +
+ Utilities::int_to_string (refinement_cycle, 2) +
+ "." +
+ Utilities::int_to_string(i, 2) +
+ ".vtu");
+ const std::string
+ pvtu_master_filename = ("solution-" +
+ Utilities::int_to_string (refinement_cycle, 2) +
+ ".pvtu");
+ std::ofstream pvtu_master (pvtu_master_filename.c_str());
+ data_out.write_pvtu_record (pvtu_master, filenames);
+ }
+ }
+
+
+
+ template <int dim>
+ void
+ StokesProblem<dim>::refine_mesh ()
+ {
+
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+
+ FEValuesExtractors::Scalar pressure(dim);
+ KellyErrorEstimator<dim>::estimate (dof_handler,
+ QGauss<dim-1>(degree+1),
+ typename FunctionMap<dim>::type(),
+ solution,
+ estimated_error_per_cell,
+ fe.component_mask(pressure));
+
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_number (triangulation,
+ estimated_error_per_cell,
+ 0.3, 0.0);
+ triangulation.execute_coarsening_and_refinement ();
+ }
+
+
+
+ template <int dim>
+ void StokesProblem<dim>::run ()
+ {
+ Point<dim> center;
+ const double inner_radius = .5;
+ const double outer_radius = 1.;
+
+ GridGenerator::quarter_hyper_shell (triangulation,
+ center,
+ inner_radius,
+ outer_radius,
+ 0,
+ true);
+
+#ifdef PERIODIC
+ std::vector<GridTools::PeriodicFacePair<typename parallel::distributed::Triangulation<dim>::cell_iterator> >
+ periodicity_vector;
+
+ FullMatrix<double> matrix(dim);
+ matrix[0][1]=1.;
+ matrix[1][0]=-1.;
+
+ std::vector<unsigned int> first_vector_components;
+ first_vector_components.push_back(0);
+
+ GridTools::collect_periodic_faces
+ (triangulation, 2, 3, 1, periodicity_vector, Tensor<1,dim>(),
+ matrix, first_vector_components);
+
+ triangulation.add_periodicity(periodicity_vector);
+#endif
+
+
+ triangulation.set_boundary(0, boundary);
+ triangulation.set_boundary(1, boundary);
+
+ triangulation.refine_global (4-dim);
+
+ for (unsigned int refinement_cycle = 0; refinement_cycle<3;
+ ++refinement_cycle)
+ {
+ pcout << "Refinement cycle " << refinement_cycle << std::endl;
+
+ if (refinement_cycle > 0)
+ refine_mesh ();
+
+ setup_dofs ();
+
+ pcout << " Assembling..." << std::endl << std::flush;
+ assemble_system ();
+
+ pcout << " Solving..." << std::endl << std::flush;
+ solve ();
+
+ check_periodicity(refinement_cycle);
+// output_results (refinement_cycle);
+
+ pcout << std::endl;
+ }
+ }
+}
+
+
+
+int main (int argc, char *argv[])
+{
+ try
+ {
+ using namespace dealii;
+ using namespace Step22;
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ deallog.depth_console (0);
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD)==0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile, false);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+ {
+ StokesProblem<2> flow_problem(1);
+ flow_problem.run ();
+ }
+ }
+ else
+ {
+ StokesProblem<2> flow_problem(1);
+ flow_problem.run ();
+ }
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ }
+
+ return 0;
+}
--- /dev/null
+Refinement cycle 0
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.750000 -1.33333 0.00000
+0.00000 0.875000 -1.14286 0.00000
+
+Refinement cycle 1
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.562500 -1.77778 0.00000
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.687500 -1.45455 0.00000
+0.00000 0.750000 -1.33333 5.49010e-13
+0.00000 0.812500 -1.23077 2.80537e-13
+0.00000 0.875000 -1.14286 0.00000
+0.00000 0.937500 -1.06667 0.00000
+
+Refinement cycle 2
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.531250 -1.88235 0.00000
+0.00000 0.562500 -1.77778 0.00000
+0.00000 0.593750 -1.68421 0.00000
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.656250 -1.52381 0.00000
+0.00000 0.687500 -1.45455 0.00000
+0.00000 0.718750 -1.39130 1.95566e-17
+0.00000 0.750000 -1.33333 5.49063e-13
+0.00000 0.781250 -1.28000 0.00000
+0.00000 0.812500 -1.23077 2.80586e-13
+0.00000 0.843750 -1.18519 1.69980e-17
+0.00000 0.875000 -1.14286 0.00000
+0.00000 0.906250 -1.10357 0.00000
+0.00000 0.937500 -1.06667 0.00000
+0.00000 0.968750 -1.03214 0.00000
+
--- /dev/null
+Refinement cycle 0
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.750000 -1.33333 0.00000
+0.00000 0.875000 -1.14286 0.00000
+
+Refinement cycle 1
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.562500 -1.77778 0.00000
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.687500 -1.45455 0.00000
+0.00000 0.750000 -1.33333 5.49010e-13
+0.00000 0.812500 -1.23077 2.80537e-13
+0.00000 0.875000 -1.14286 0.00000
+0.00000 0.937500 -1.06667 0.00000
+
+Refinement cycle 2
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.531250 -1.88235 0.00000
+0.00000 0.562500 -1.77778 0.00000
+0.00000 0.593750 -1.68421 0.00000
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.656250 -1.52381 0.00000
+0.00000 0.687500 -1.45455 0.00000
+0.00000 0.718750 -1.39130 1.95566e-17
+0.00000 0.750000 -1.33333 5.49063e-13
+0.00000 0.781250 -1.28000 0.00000
+0.00000 0.812500 -1.23077 2.80586e-13
+0.00000 0.843750 -1.18519 1.69980e-17
+0.00000 0.875000 -1.14286 0.00000
+0.00000 0.906250 -1.10357 0.00000
+0.00000 0.937500 -1.06667 0.00000
+0.00000 0.968750 -1.03214 0.00000
+
--- /dev/null
+Refinement cycle 0
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.750000 -1.33333 0.00000
+0.00000 0.875000 -1.14286 0.00000
+
+Refinement cycle 1
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.562500 -1.77778 0.00000
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.687500 -1.45455 0.00000
+0.00000 0.750000 -1.33333 5.49010e-13
+0.00000 0.812500 -1.23077 2.80537e-13
+0.00000 0.875000 -1.14286 0.00000
+0.00000 0.937500 -1.06667 0.00000
+
+Refinement cycle 2
+ Assembling...
+ Computing preconditioner...
+ Solving...
+0.00000 0.531250 -1.88235 0.00000
+0.00000 0.562500 -1.77778 0.00000
+0.00000 0.593750 -1.68421 0.00000
+0.00000 0.625000 -1.60000 0.00000
+0.00000 0.656250 -1.52381 0.00000
+0.00000 0.687500 -1.45455 0.00000
+0.00000 0.718750 -1.39130 1.95566e-17
+0.00000 0.750000 -1.33333 5.49063e-13
+0.00000 0.781250 -1.28000 0.00000
+0.00000 0.812500 -1.23077 2.80586e-13
+0.00000 0.843750 -1.18519 1.69980e-17
+0.00000 0.875000 -1.14286 0.00000
+0.00000 0.906250 -1.10357 0.00000
+0.00000 0.937500 -1.06667 0.00000
+0.00000 0.968750 -1.03214 0.00000
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// solves a 2D Poisson equation for linear FE_DGP elements (SIP
+// discretization) with MueLu preconditioner
+
+#include "../tests.h"
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/fe/fe_dgp.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/compressed_sparsity_pattern.h>
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/base/function.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/meshworker/dof_info.h>
+#include <deal.II/meshworker/integration_info.h>
+#include <deal.II/meshworker/assembler.h>
+#include <deal.II/meshworker/loop.h>
+#include <deal.II/integrators/laplace.h>
+
+
+#include <fstream>
+#include <iomanip>
+
+
+template <int dim>
+class MatrixIntegrator : public MeshWorker::LocalIntegrator<dim>
+{
+public:
+ void cell(MeshWorker::DoFInfo<dim> &dinfo,
+ typename MeshWorker::IntegrationInfo<dim> &info) const;
+ void boundary(MeshWorker::DoFInfo<dim> &dinfo,
+ typename MeshWorker::IntegrationInfo<dim> &info) const;
+ void face(MeshWorker::DoFInfo<dim> &dinfo1,
+ MeshWorker::DoFInfo<dim> &dinfo2,
+ typename MeshWorker::IntegrationInfo<dim> &info1,
+ typename MeshWorker::IntegrationInfo<dim> &info2) const;
+};
+
+template <int dim>
+void MatrixIntegrator<dim>
+::cell(MeshWorker::DoFInfo<dim> &dinfo,
+ typename MeshWorker::IntegrationInfo<dim> &info) const
+{
+ LocalIntegrators::Laplace::cell_matrix(dinfo.matrix(0,false).matrix,
+ info.fe_values());
+}
+
+template <int dim>
+void MatrixIntegrator<dim>
+::boundary(MeshWorker::DoFInfo<dim> &dinfo,
+ typename MeshWorker::IntegrationInfo<dim> &info) const
+{
+ const unsigned int deg = info.fe_values(0).get_fe().degree;
+ LocalIntegrators::Laplace
+ ::nitsche_matrix(dinfo.matrix(0,false).matrix, info.fe_values(0),
+ LocalIntegrators::Laplace::
+ compute_penalty(dinfo, dinfo, deg, deg));
+}
+
+template <int dim>
+void MatrixIntegrator<dim>
+::face(MeshWorker::DoFInfo<dim> &dinfo1,
+ MeshWorker::DoFInfo<dim> &dinfo2,
+ typename MeshWorker::IntegrationInfo<dim> &info1,
+ typename MeshWorker::IntegrationInfo<dim> &info2) const
+{
+ const unsigned int deg = info1.fe_values(0).get_fe().degree;
+ LocalIntegrators::Laplace
+ ::ip_matrix(dinfo1.matrix(0,false).matrix, dinfo1.matrix(0,true).matrix,
+ dinfo2.matrix(0,true).matrix, dinfo2.matrix(0,false).matrix,
+ info1.fe_values(0), info2.fe_values(0),
+ LocalIntegrators::Laplace::compute_penalty(dinfo1, dinfo2, deg, deg));
+}
+
+
+template <int dim>
+class Step4
+{
+public:
+ Step4 ();
+ void run ();
+
+private:
+ void make_grid ();
+ void setup_system();
+ void solve ();
+
+ Triangulation<dim> triangulation;
+ FE_DGP<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ TrilinosWrappers::SparseMatrix system_matrix;
+
+ Vector<double> solution;
+ Vector<double> system_rhs;
+};
+
+
+
+
+template <int dim>
+Step4<dim>::Step4 ()
+ :
+ fe (1),
+ dof_handler (triangulation)
+{}
+
+
+template <int dim>
+void Step4<dim>::make_grid ()
+{
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (6);
+}
+
+
+
+template <int dim>
+void Step4<dim>::setup_system ()
+{
+ dof_handler.distribute_dofs (fe);
+
+ CompressedSparsityPattern c_sparsity(dof_handler.n_dofs());
+ DoFTools::make_flux_sparsity_pattern (dof_handler, c_sparsity);
+ system_matrix.reinit (c_sparsity);
+
+ solution.reinit (dof_handler.n_dofs());
+ system_rhs.reinit (dof_handler.n_dofs());
+
+ MappingQ1<dim> mapping;
+ MeshWorker::IntegrationInfoBox<dim> info_box;
+ UpdateFlags update_flags = update_values | update_gradients;
+ info_box.add_update_flags_all(update_flags);
+ info_box.initialize(fe, mapping);
+
+ MeshWorker::DoFInfo<dim> dof_info(dof_handler);
+ MeshWorker::Assembler::MatrixSimple<TrilinosWrappers::SparseMatrix> assembler;
+ assembler.initialize(system_matrix);
+ MatrixIntegrator<dim> integrator;
+ MeshWorker::integration_loop<dim, dim>(dof_handler.begin_active(),
+ dof_handler.end(),
+ dof_info, info_box,
+ integrator, assembler);
+
+ system_matrix.compress(VectorOperation::add);
+
+ for (unsigned int i=0; i<system_rhs.size(); ++i)
+ system_rhs(i) = 0.01*i-0.000001*i*i;
+}
+
+
+
+template <int dim>
+void Step4<dim>::solve ()
+{
+
+ deallog.push(Utilities::int_to_string(dof_handler.n_dofs(),5));
+ TrilinosWrappers::PreconditionAMGMueLu preconditioner;
+ TrilinosWrappers::PreconditionAMGMueLu::AdditionalData data;
+ DoFTools::extract_constant_modes(dof_handler, std::vector<bool>(1,true),
+ data.constant_modes);
+ data.smoother_sweeps = 2;
+ {
+ solution = 0;
+ SolverControl solver_control (1000, 1e-10);
+ SolverCG<> solver (solver_control);
+ preconditioner.initialize(system_matrix, data);
+ solver.solve (system_matrix, solution, system_rhs,
+ preconditioner);
+ }
+ deallog.pop();
+}
+
+
+
+template <int dim>
+void Step4<dim>::run()
+{
+ for (unsigned int cycle = 0; cycle < 2; ++cycle)
+ {
+ if (cycle == 0)
+ make_grid();
+ else
+ triangulation.refine_global(1);
+
+ setup_system();
+ solve();
+ }
+}
+
+
+int main (int argc, char **argv)
+{
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv);
+
+ try
+ {
+ Step4<2> test;
+ test.run();
+ }
+ catch (std::exception &exc)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+
+DEAL:12288:cg::Starting value 1970.22
+DEAL:12288:cg::Convergence step 19 value 0
+DEAL:49152:cg::Starting value 179304.
+DEAL:49152:cg::Convergence step 31 value 0
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// solves a 2D Poisson equation for FE_Q elements using FE_Q_iso_Q1 elements
+// for the MueLu preconditioner. The problem is taken from step-4
+
+#include "../tests.h"
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_q_iso_q1.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/compressed_sparsity_pattern.h>
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/base/function.h>
+#include <deal.II/grid/tria.h>
+
+#include <fstream>
+#include <iomanip>
+
+
+template <int dim>
+class Step4
+{
+public:
+ Step4 ();
+ void run ();
+
+private:
+ void make_grid ();
+ void setup_system();
+ void assemble_system ();
+ void assemble_preconditioner ();
+ void solve ();
+
+ Triangulation<dim> triangulation;
+ FE_Q<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ FE_Q_iso_Q1<dim> fe_precondition;
+ DoFHandler<dim> dof_handler_precondition;
+
+ ConstraintMatrix constraints;
+
+ TrilinosWrappers::SparseMatrix system_matrix;
+ TrilinosWrappers::SparseMatrix preconditioner_matrix;
+
+ Vector<double> solution;
+ Vector<double> system_rhs;
+};
+
+
+template <int dim>
+class RightHandSide : public Function<dim>
+{
+public:
+ RightHandSide () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+template <int dim>
+class BoundaryValues : public Function<dim>
+{
+public:
+ BoundaryValues () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+
+template <int dim>
+double RightHandSide<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ double return_value = 0;
+ for (unsigned int i=0; i<dim; ++i)
+ return_value += 4*std::pow(p(i), 4);
+
+ return return_value;
+}
+
+
+
+template <int dim>
+double BoundaryValues<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ return p.square();
+}
+
+
+
+template <int dim>
+Step4<dim>::Step4 ()
+ :
+ fe (3),
+ dof_handler (triangulation),
+ fe_precondition (3),
+ dof_handler_precondition(triangulation)
+{}
+
+
+template <int dim>
+void Step4<dim>::make_grid ()
+{
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (4);
+}
+
+
+
+template <int dim>
+void Step4<dim>::setup_system ()
+{
+ dof_handler.distribute_dofs (fe);
+ dof_handler_precondition.distribute_dofs (fe_precondition);
+
+ constraints.clear();
+ std::map<unsigned int,double> boundary_values;
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ BoundaryValues<dim>(),
+ constraints);
+ constraints.close();
+
+ CompressedSparsityPattern c_sparsity(dof_handler.n_dofs());
+ DoFTools::make_sparsity_pattern (dof_handler, c_sparsity, constraints, false);
+ system_matrix.reinit (c_sparsity);
+ preconditioner_matrix.reinit(c_sparsity);
+
+ solution.reinit (dof_handler.n_dofs());
+ system_rhs.reinit (dof_handler.n_dofs());
+}
+
+
+template <int dim>
+void Step4<dim>::assemble_system ()
+{
+ QGauss<dim> quadrature_formula(fe.degree+1);
+
+ const RightHandSide<dim> right_hand_side;
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+
+ for (; cell!=endc; ++cell)
+ {
+ fe_values.reinit (cell);
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad (i, q_point) *
+ fe_values.shape_grad (j, q_point) *
+ fe_values.JxW (q_point));
+
+ cell_rhs(i) += (fe_values.shape_value (i, q_point) *
+ right_hand_side.value (fe_values.quadrature_point (q_point)) *
+ fe_values.JxW (q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global(cell_matrix, cell_rhs,
+ local_dof_indices,
+ system_matrix, system_rhs);
+ }
+ system_matrix.compress(VectorOperation::add);
+}
+
+
+
+
+template <int dim>
+void Step4<dim>::assemble_preconditioner ()
+{
+ QIterated<dim> quadrature_formula(QGauss<1>(2), fe.degree);
+
+ const RightHandSide<dim> right_hand_side;
+
+ FEValues<dim> fe_values (fe_precondition, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe_precondition.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler_precondition.begin_active(),
+ endc = dof_handler_precondition.end();
+
+ for (; cell!=endc; ++cell)
+ {
+ fe_values.reinit (cell);
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad (i, q_point) *
+ fe_values.shape_grad (j, q_point) *
+ fe_values.JxW (q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global(cell_matrix,
+ local_dof_indices,
+ preconditioner_matrix);
+ }
+ preconditioner_matrix.compress(VectorOperation::add);
+}
+
+
+template <int dim>
+void Step4<dim>::solve ()
+{
+
+ // variant 1: solve with MueLu
+ deallog.push(Utilities::int_to_string(dof_handler.n_dofs(),5));
+ deallog.push("MueLu_Q");
+ {
+ solution = 0;
+ SolverControl solver_control (1000, 1e-12);
+ SolverCG<> solver (solver_control);
+ TrilinosWrappers::PreconditionAMGMueLu preconditioner;
+ preconditioner.initialize(system_matrix);
+ solver.solve (system_matrix, solution, system_rhs,
+ preconditioner);
+ }
+ deallog.pop();
+
+ deallog.push("MueLu_Q_iso_Q1");
+ {
+ solution = 0;
+ SolverControl solver_control (1000, 1e-12);
+ SolverCG<> solver (solver_control);
+ TrilinosWrappers::PreconditionAMGMueLu preconditioner;
+ preconditioner.initialize(preconditioner_matrix);
+ solver.solve (system_matrix, solution, system_rhs,
+ preconditioner);
+ }
+ deallog.pop();
+ deallog << std::endl;
+ deallog.pop();
+}
+
+
+
+template <int dim>
+void Step4<dim>::run()
+{
+ for (unsigned int cycle = 0; cycle < 2; ++cycle)
+ {
+ if (cycle == 0)
+ make_grid();
+ else
+ triangulation.refine_global(1);
+
+ setup_system();
+ assemble_system();
+ assemble_preconditioner();
+ solve();
+ }
+}
+
+
+int main (int argc, char **argv)
+{
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv);
+
+ try
+ {
+ Step4<2> test;
+ test.run();
+ }
+ catch (std::exception &exc)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+
+DEAL:02401:MueLu_Q:cg::Starting value 31.4643
+DEAL:02401:MueLu_Q:cg::Convergence step 45 value 0
+DEAL:02401:MueLu_Q_iso_Q1:cg::Starting value 31.4643
+DEAL:02401:MueLu_Q_iso_Q1:cg::Convergence step 34 value 0
+DEAL:02401::
+DEAL:09409:MueLu_Q:cg::Starting value 44.5271
+DEAL:09409:MueLu_Q:cg::Convergence step 47 value 0
+DEAL:09409:MueLu_Q_iso_Q1:cg::Starting value 44.5271
+DEAL:09409:MueLu_Q_iso_Q1:cg::Convergence step 34 value 0
+DEAL:09409::
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// solves a 2D Poisson equation for linear elements with MueLu preconditioner
+// and various smoothers
+
+#include "../tests.h"
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/compressed_sparsity_pattern.h>
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/base/function.h>
+#include <deal.II/grid/tria.h>
+
+#include <fstream>
+#include <iomanip>
+
+
+template <int dim>
+class Step4
+{
+public:
+ Step4 ();
+ void run ();
+
+private:
+ void make_grid ();
+ void setup_system();
+ void assemble_system ();
+ void solve ();
+
+ Triangulation<dim> triangulation;
+ FE_Q<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ ConstraintMatrix constraints;
+
+ TrilinosWrappers::SparseMatrix system_matrix;
+
+ Vector<double> solution;
+ Vector<double> system_rhs;
+};
+
+
+template <int dim>
+class RightHandSide : public Function<dim>
+{
+public:
+ RightHandSide () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+template <int dim>
+class BoundaryValues : public Function<dim>
+{
+public:
+ BoundaryValues () : Function<dim>() {}
+
+ virtual double value (const Point<dim> &p,
+ const unsigned int component = 0) const;
+};
+
+
+
+
+template <int dim>
+double RightHandSide<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ double return_value = 0;
+ for (unsigned int i=0; i<dim; ++i)
+ return_value += 4*std::pow(p(i), 4);
+
+ return return_value;
+}
+
+
+
+template <int dim>
+double BoundaryValues<dim>::value (const Point<dim> &p,
+ const unsigned int /*component*/) const
+{
+ return p.square();
+}
+
+
+
+template <int dim>
+Step4<dim>::Step4 ()
+ :
+ fe (1),
+ dof_handler (triangulation)
+{}
+
+
+template <int dim>
+void Step4<dim>::make_grid ()
+{
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (6);
+}
+
+
+
+template <int dim>
+void Step4<dim>::setup_system ()
+{
+ dof_handler.distribute_dofs (fe);
+
+ constraints.clear();
+ std::map<unsigned int,double> boundary_values;
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ BoundaryValues<dim>(),
+ constraints);
+ constraints.close();
+
+ CompressedSparsityPattern c_sparsity(dof_handler.n_dofs());
+ DoFTools::make_sparsity_pattern (dof_handler, c_sparsity, constraints, false);
+ system_matrix.reinit (c_sparsity);
+
+ solution.reinit (dof_handler.n_dofs());
+ system_rhs.reinit (dof_handler.n_dofs());
+}
+
+
+template <int dim>
+void Step4<dim>::assemble_system ()
+{
+ QGauss<dim> quadrature_formula(fe.degree+1);
+
+ const RightHandSide<dim> right_hand_side;
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+
+ for (; cell!=endc; ++cell)
+ {
+ fe_values.reinit (cell);
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad (i, q_point) *
+ fe_values.shape_grad (j, q_point) *
+ fe_values.JxW (q_point));
+
+ cell_rhs(i) += (fe_values.shape_value (i, q_point) *
+ right_hand_side.value (fe_values.quadrature_point (q_point)) *
+ fe_values.JxW (q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global(cell_matrix, cell_rhs,
+ local_dof_indices,
+ system_matrix, system_rhs);
+ }
+ system_matrix.compress(VectorOperation::add);
+}
+
+
+
+template <int dim>
+void Step4<dim>::solve ()
+{
+
+ // variant 1: solve with MueLu
+ deallog.push(Utilities::int_to_string(dof_handler.n_dofs(),5));
+ deallog.push("Chebyshev");
+ TrilinosWrappers::PreconditionAMGMueLu preconditioner;
+ TrilinosWrappers::PreconditionAMGMueLu::AdditionalData data;
+ data.coarse_type = "Amesos-KLU";
+ data.smoother_type = "Chebyshev";
+ data.aggregation_threshold = 1e-3;
+ data.smoother_sweeps = 3;
+ {
+ solution = 0;
+ SolverControl solver_control (1000, 1e-10);
+ SolverCG<> solver (solver_control);
+ preconditioner.initialize(system_matrix, data);
+ solver.solve (system_matrix, solution, system_rhs,
+ preconditioner);
+ }
+ deallog.pop();
+
+ deallog.push("SGS");
+ data.smoother_type = "symmetric Gauss-Seidel";
+ data.smoother_sweeps = 2;
+ {
+ solution = 0;
+ SolverControl solver_control (1000, 1e-12);
+ SolverCG<> solver (solver_control);
+ preconditioner.initialize(system_matrix, data);
+ solver.solve (system_matrix, solution, system_rhs,
+ preconditioner);
+ }
+ deallog.pop();
+ deallog.pop();
+}
+
+
+
+template <int dim>
+void Step4<dim>::run()
+{
+ for (unsigned int cycle = 0; cycle < 2; ++cycle)
+ {
+ if (cycle == 0)
+ make_grid();
+ else
+ triangulation.refine_global(1);
+
+ setup_system();
+ assemble_system();
+ solve();
+ }
+}
+
+
+int main (int argc, char **argv)
+{
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv);
+
+ try
+ {
+ Step4<2> test;
+ test.run();
+ }
+ catch (std::exception &exc)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ deallog << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ deallog << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+
+DEAL:04225:Chebyshev:cg::Starting value 21.8299
+DEAL:04225:Chebyshev:cg::Convergence step 10 value 0
+DEAL:04225:SGS:cg::Starting value 21.8299
+DEAL:04225:SGS:cg::Convergence step 10 value 0
+DEAL:16641:Chebyshev:cg::Starting value 30.8770
+DEAL:16641:Chebyshev:cg::Convergence step 8 value 0
+DEAL:16641:SGS:cg::Starting value 30.8770
+DEAL:16641:SGS:cg::Convergence step 8 value 0