@@ -866,6 +866,10 @@ cmake -DCMAKE_C_COMPILER="mpicc" -DCMAKE_CXX_COMPILER="mpicxx" -DCMAKE_Fortran_C
DEAL_II_CXX_FLAGS - used during all builds
DEAL_II_CXX_FLAGS_DEBUG - additional flags for the debug library
DEAL_II_CXX_FLAGS_RELEASE - additional flags for the release library
+
+DEAL_II_LINKER_FLAGS - used during all builds
+DEAL_II_LINKER_FLAGS_DEBUG - additional flags for the debug library
+DEAL_II_LINKER_FLAGS_RELEASE - additional flags for the release library
The content of the cached variables will be preserved
@@ -974,18 +978,6 @@ $ cmake -C config.sample <...>
configuration.
-
- The build system automatically exports a config.camke
- into the binary directory that can be used to conveniently clone a
- build directories cache entries, e.g. assuming that there is a
- configured build directory build
next to an empty
- directory build2
:
-
-$ cd build2
-$ cmake -C ../build/config.cmake ../deal.II
-
-
-
Compiling only certain parts
diff --git a/deal.II/doc/users/config.sample b/deal.II/doc/users/config.sample
index a64c3dcbb4..b37132c370 100644
--- a/deal.II/doc/users/config.sample
+++ b/deal.II/doc/users/config.sample
@@ -1,37 +1,9 @@
-## ---------------------------------------------------------------------
-## $Id$
-##
-## Copyright (C) 2013 by the deal.II Authors
-##
-## This file is part of the deal.II library.
-##
-## The deal.II library is free software; you can use it, redistribute
-## it, and/or modify it under the terms of the GNU Lesser General
-## Public License as published by the Free Software Foundation; either
-## version 2.1 of the License, or (at your option) any later version.
-## The full text of the license can be found in the file LICENSE at
-## the top level of the deal.II distribution.
-##
-## ---------------------------------------------------------------------
-
-#
-# Example configuration file
-#
-# This file can be used as a script to preseed the initial cache of a build
-# directory. To do so, invoke cmake with
-#
-# $ cmake -C configuration.cmake [...]
-#
-#
-# Please note that this file is actually intrepeted as a full featured
-# CMake script file, so in order to set variables in the cache a statement
-# of the form
-#
-# SET( CACHE "")
-#
-# has to be used. See doc/readme.html and doc/users/cmake.html for further
-# details on how to use the cmake build system of deal.II.
-#
+## ##
+# Example configuration file #
+# #
+# See doc/readme.html and doc/users/cmake.html for further #
+# details on how to use the cmake build system of deal.II. #
+## ##
###########################################################################
@@ -119,8 +91,8 @@
# "Fortran Compiler."
# )
#
-# SET(DEAL_II_CXX_FLAGS "" CACHE STRING
-# "The user supplied cache variable will be appended _at the end_ of the auto generated DEAL_II_CXX_FLAGS variable"
+# SET(CMAKE_CXX_FLAGS "" CACHE STRING
+# "The user supplied cache variable will be appended _at the end_ of the auto generated CMAKE_CXX_FLAGS variable"
# )
#
# SET(DEAL_II_CXX_FLAGS_DEBUG "" CACHE STRING
@@ -143,18 +115,6 @@
# "The user supplied cache variable will be appended _at the end_ of the auto generated DEAL_II_LINKER_FLAGS_RELEASE variable"
# )
#
-# SET(DEAL_II_DEFINITIONS "" CACHE STRING
-# "Additional, user supplied compile definitions"
-# )
-#
-# SET(DEAL_II_DEFINITIONS_DEBUG "" CACHE STRING
-# "Additional, user supplied compile definitions"
-# )
-#
-# SET(DEAL_II_DEFINITIONS_RELEASE "" CACHE STRING
-# "Additional, user supplied compile definitions"
-# )
-#
# SET(BUILD_SHARED_LIBS "ON" CACHE BOOL
# "Build a shared library"
# )
@@ -175,6 +135,7 @@
# "If set to ON, then use 64-bit data types to represent global degree of freedom indices. The default is to OFF. You only want to set this to ON if you will solve problems with more than 2^31 (approximately 2 billion) unknowns. If set to ON, you also need to ensure that both Trilinos and/or PETSc support 64-bit indices."
# )
#
+#
###########################################################################
@@ -415,6 +376,25 @@
#
+#
+# muPaser:
+#
+# SET(DEAL_II_WITH_MUPARSER ON CACHE BOOL
+# "Build deal.II with support for muparser"
+#
+# Automatic detection:
+#
+# Specify a hint with CMAKE_PREFIX_PATH or by setting
+# SET(MUPARSER_DIR "/.../..." CACHE PATH "")
+#
+# Manual setup:
+#
+# SET(MUPARSER_FOUND TRUE CACHE BOOL "")
+# SET(MUPARSER_LIBRARIES "library;and;semicolon;separated;list;of;link;interface" CACHE STRING "")
+# SET(MUPARSER_INCLUDE_DIRS "semicolon;separated;list;of;include;dirs" CACHE STRING "")
+#
+
+
#
# Netcdf:
#
@@ -632,11 +612,12 @@
# C++11 support is autodetected. You can explicitly disable C+11 support by
# specifying
#
-# SET(DEAL_II_WITH_CXX11 FALSE CACHE BOOL "")
+# SET(DEAL_II_HAVE_CXX11_FLAG FALSE CACHE BOOL "")
#
# A custom C++11 flag can be set by setting
#
-# SET(DEAL_II_CXX11_FLAG "-std=c++11" CACHE STRING "")
+# SET(DEAL_II_HAVE_CXX11_FLAG TRUE CACHE BOOL "")
+# SET(DEAL_II_CXX11_FLAG "-std=c++0x" CACHE STRING "")
#
@@ -674,7 +655,7 @@
# "Library suffix for the debug library"
# )
#
-# SET(DEAL_II_RELEASE_SUFFIX "" CACHE STRING
+# SET_IF_EMPTY(DEAL_II_RELEASE_SUFFIX "" CACHE STRING
# "Library suffix for the release library"
# )
#
diff --git a/deal.II/examples/step-15/doc/intro.dox b/deal.II/examples/step-15/doc/intro.dox
index 2cebf0f9f1..099982af18 100644
--- a/deal.II/examples/step-15/doc/intro.dox
+++ b/deal.II/examples/step-15/doc/intro.dox
@@ -47,7 +47,7 @@ $\Omega$ is the domain we get by projecting the wire's positions into $x-y$
space. In this example, we choose $\Omega$ as the unit disk.
As described above, we solve this equation using Newton's method in which we
-compute the $n$th approximate solution from the $n-1$st one, and use
+compute the $n$th approximate solution from the $n$th$-1$ one, and use
a damping parameter $\alpha^n$ to get better global convergence behavior:
@f{align*}
F'(u^{n},\delta u^{n})&=- F(u^{n})
diff --git a/deal.II/examples/step-16/step-16.cc b/deal.II/examples/step-16/step-16.cc
index 1eb7c93fac..e8a2035f0b 100644
--- a/deal.II/examples/step-16/step-16.cc
+++ b/deal.II/examples/step-16/step-16.cc
@@ -62,15 +62,15 @@
#include
#include
-// These, now, are the include necessary for the multilevel methods. The
-// first two declare classes that allow us to enumerate degrees of freedom not
-// only on the finest mesh level, but also on intermediate levels (that's what
-// the MGDoFHandler class does) as well as allow to access this information
-// (iterators and accessors over these cells).
+// These, now, are the include necessary for the multilevel methods. The first
+// one declares how to handle Dirichlet boundary conditions on each of the
+// levels of the multigrid method. For the actual description of the degrees
+// of freedom, we do not need any new include file because DoFHandler already
+// has all necessary methods implemented. We will only need to distribute the
+// DoFs for the levels further down.
//
// The rest of the include files deals with the mechanics of multigrid as a
// linear operator (solver or preconditioner).
-#include
#include
#include
#include
@@ -112,7 +112,7 @@ namespace Step16
Triangulation triangulation;
FE_Q fe;
- MGDoFHandler mg_dof_handler;
+ DoFHandler dof_handler;
SparsityPattern sparsity_pattern;
SparseMatrix system_matrix;
@@ -234,7 +234,7 @@ namespace Step16
triangulation (Triangulation::
limit_level_difference_at_vertices),
fe (degree),
- mg_dof_handler (triangulation),
+ dof_handler (triangulation),
degree(degree)
{}
@@ -243,29 +243,31 @@ namespace Step16
// @sect4{LaplaceProblem::setup_system}
// The following function extends what the corresponding one in step-6
- // did. The top part, apart from the additional output, does the same:
+ // did, with the exception of also distributing the degrees of freedom on
+ // each level of the mesh which is needed for the multigrid hierarchy.
template
void LaplaceProblem::setup_system ()
{
- mg_dof_handler.distribute_dofs (fe);
+ dof_handler.distribute_dofs (fe);
+ dof_handler.distribute_mg_dofs (fe);
// Here we output not only the degrees of freedom on the finest level, but
// also in the multilevel structure
deallog << "Number of degrees of freedom: "
- << mg_dof_handler.n_dofs();
+ << dof_handler.n_dofs();
for (unsigned int l=0; l::type dirichlet_boundary_functions;
ZeroFunction homogeneous_dirichlet_bc (1);
dirichlet_boundary_functions[0] = &homogeneous_dirichlet_bc;
- VectorTools::interpolate_boundary_values (static_cast&>(mg_dof_handler),
+ VectorTools::interpolate_boundary_values (static_cast&>(dof_handler),
dirichlet_boundary_functions,
constraints);
constraints.close ();
@@ -299,7 +301,7 @@ namespace Step16
// about the boundary values as well, so we pass the
// dirichlet_boundary
here as well.
mg_constrained_dofs.clear();
- mg_constrained_dofs.initialize(mg_dof_handler, dirichlet_boundary_functions);
+ mg_constrained_dofs.initialize(dof_handler, dirichlet_boundary_functions);
// Now for the things that concern the multigrid data structures. First,
@@ -337,9 +339,9 @@ namespace Step16
for (unsigned int level=0; level coefficient;
std::vector coefficient_values (n_q_points);
- typename MGDoFHandler::active_cell_iterator
- cell = mg_dof_handler.begin_active(),
- endc = mg_dof_handler.end();
+ typename DoFHandler::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
cell_matrix = 0;
@@ -455,44 +453,39 @@ namespace Step16
// obscure if you're not familiar with the algorithm actually implemented
// in deal.II to support multilevel algorithms on adaptive meshes; if some
// of the things below seem strange, take a look at the @ref mg_paper.
- //
- // Our first job is to identify those degrees of freedom on each level
- // that are located on interfaces between adaptively refined levels, and
- // those that lie on the interface but also on the exterior boundary of
- // the domain. As in many other parts of the library, we do this by using
- // Boolean masks, i.e. vectors of Booleans each element of which indicates
- // whether the corresponding degree of freedom index is an interface DoF
- // or not. The MGConstraints
already computed the information
- // for us when we called initialize in setup_system()
.
- std::vector > interface_dofs
- = mg_constrained_dofs.get_refinement_edge_indices ();
- std::vector > boundary_interface_dofs
- = mg_constrained_dofs.get_refinement_edge_boundary_indices ();
-
- // The indices just identified will later be used to decide where the
- // assembled value has to be added into on each level. On the other hand,
- // we also have to impose zero boundary conditions on the external
- // boundary of each level. But this the MGConstraints
knows.
- // So we simply ask for them by calling get_boundary_indices()
.
- // The third step is to construct constraints on all those
- // degrees of freedom: their value should be zero after each application
- // of the level operators. To this end, we construct ConstraintMatrix
- // objects for each level, and add to each of these constraints for each
- // degree of freedom. Due to the way the ConstraintMatrix stores its data,
- // the function to add a constraint on a single degree of freedom and
- // force it to be zero is called Constraintmatrix::add_line(); doing so
- // for several degrees of freedom at once can be done using
+
+ // Our first job is to identify the boundary conditions for the levels. On
+ // each level, we impose Dirichlet boundary conditions on the exterior
+ // boundary of the domain as well as on interfaces between adaptively
+ // refined levels. As in many other parts of the library, we do this by
+ // using a mask described by an IndexSet. The MGConstraints
+ // already computed the information for us when we called initialize in
+ // setup_system()
. So we simply ask for them by calling
+ // get_boundary_indices()
and
+ // get_refinement_edge_indices(level)
. Moreover, we have to
+ // identify the subset of the refinement edge indices which are also
+ // located on the boundary as they require special treatment in the
+ // algorithm further down.
+
+ // These three masks are used to fill a ConstraintMatrix objects for each
+ // level that we use during the assembly of the matrix: the value of the
+ // associated degrees of freedom should be zero after each application of
+ // the level operators. Due to the way the ConstraintMatrix stores its
+ // data, the function to add a constraint on a single degree of freedom
+ // and force it to be zero is called Constraintmatrix::add_line(); doing
+ // so for several degrees of freedom at once can be done using
// Constraintmatrix::add_lines():
std::vector boundary_constraints (triangulation.n_levels());
std::vector boundary_interface_constraints (triangulation.n_levels());
for (unsigned int level=0; levelassemble_system, with two exceptions: (i) we don't need a
// right hand side, and more significantly (ii) we don't just loop over
// all active cells, but in fact all cells, active or not. Consequently,
- // the correct iterator to use is MGDoFHandler::cell_iterator rather than
- // MGDoFHandler::active_cell_iterator. Let's go about it:
- typename MGDoFHandler::cell_iterator cell = mg_dof_handler.begin(),
- endc = mg_dof_handler.end();
+ // the correct iterator to use is DoFHandler::cell_iterator rather than
+ // DoFHandler::active_cell_iterator. Let's go about it:
+ typename DoFHandler::cell_iterator cell = dof_handler.begin(),
+ endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
@@ -526,8 +519,8 @@ namespace Step16
// with a gotcha that is easily forgotten: The indices of global
// degrees of freedom we want here are the ones for current level, not
// for the global matrix. We therefore need the function
- // MGDoFAccessor::get_mg_dof_indices, not
- // MGDoFAccessor::get_dof_indices as used in the assembly of the
+ // DoFAccessor::get_mg_dof_indices, not
+ // DoFAccessor::get_dof_indices as used in the assembly of the
// global system:
cell->get_mg_dof_indices (local_dof_indices);
@@ -554,7 +547,7 @@ namespace Step16
// we have a symmetric operator, one of these matrices is the
// transpose of the other.
//
- // The way we assemble these matrices is as follows: since the are
+ // The way we assemble these matrices is as follows: since they are
// formed from parts of the local contributions, we first delete all
// those parts of the local contributions that we are not interested
// in, namely all those elements of the local matrix for which not $i$
@@ -572,8 +565,10 @@ namespace Step16
// necessary.
for (unsigned int i=0; ilevel()][local_dof_indices[i]]==true &&
- interface_dofs[cell->level()][local_dof_indices[j]]==false))
+ if ( !(mg_constrained_dofs.get_refinement_edge_indices(cell->level()).
+ is_element(local_dof_indices[i])==true &&
+ mg_constrained_dofs.get_refinement_edge_indices(cell->level()).
+ is_element(local_dof_indices[j])==false))
cell_matrix(i,j) = 0;
boundary_interface_constraints[cell->level()]
@@ -597,8 +592,11 @@ namespace Step16
// the finite element function spaces involved and can often be computed in
// a generic way independent of the problem under consideration. In that
// case, we can use the MGTransferPrebuilt class that, given the constraints
- // on the global level and an MGDoFHandler object computes the matrices
- // corresponding to these transfer operators.
+ // of the final linear system and the MGConstrainedDoFs object that knows
+ // about the boundary conditions on the each level and the degrees of
+ // freedom on interfaces between different refinement level can build the
+ // matrices for those transfer operations from a DoFHandler object with
+ // level degrees of freedom.
//
// The second part of the following lines deals with the coarse grid
// solver. Since our coarse grid is very coarse indeed, we decide for a
@@ -609,15 +607,8 @@ namespace Step16
template
void LaplaceProblem::solve ()
{
-
- // Create the object that deals with the transfer between different
- // refinement levels. We need to pass it the hanging node constraints.
MGTransferPrebuilt > mg_transfer(hanging_node_constraints, mg_constrained_dofs);
- // Now the prolongation matrix has to be built. This matrix needs to take
- // the boundary values on each level into account and needs to know about
- // the indices at the refinement edges. The MGConstraints
- // knows about that so pass it as an argument.
- mg_transfer.build_matrices(mg_dof_handler);
+ mg_transfer.build_matrices(dof_handler);
FullMatrix coarse_matrix;
coarse_matrix.copy_from (mg_matrices[0]);
@@ -668,13 +659,13 @@ namespace Step16
// the transpose operator for the latter operation, allowing us to
// initialize both up and down versions of the operator with the matrices
// we already built:
- MGMatrix<> mg_matrix(&mg_matrices);
- MGMatrix<> mg_interface_up(&mg_interface_matrices);
- MGMatrix<> mg_interface_down(&mg_interface_matrices);
+ mg::Matrix > mg_matrix(mg_matrices);
+ mg::Matrix > mg_interface_up(mg_interface_matrices);
+ mg::Matrix > mg_interface_down(mg_interface_matrices);
// Now, we are ready to set up the V-cycle operator and the multilevel
// preconditioner.
- Multigrid > mg(mg_dof_handler,
+ Multigrid > mg(dof_handler,
mg_matrix,
coarse_grid_solver,
mg_transfer,
@@ -683,7 +674,7 @@ namespace Step16
mg.set_edge_matrices(mg_interface_down, mg_interface_up);
PreconditionMG, MGTransferPrebuilt > >
- preconditioner(mg_dof_handler, mg, mg_transfer);
+ preconditioner(dof_handler, mg, mg_transfer);
// With all this together, we can finally get about solving the linear
// system in the usual way:
@@ -709,9 +700,7 @@ namespace Step16
// computed. In particular, the first one refines the mesh at the beginning
// of each cycle while the second one outputs results at the end of each
// such cycle. The functions are almost unchanged from those in step-6, with
- // the exception of two minor differences: The KellyErrorEstimator::estimate
- // function wants an argument of type DoFHandler, not MGDoFHandler, and so
- // we have to cast from derived to base class; and we generate output in VTK
+ // the exception of one minor difference: we generate output in VTK
// format, to use the more modern visualization programs available today
// compared to those that were available when step-6 was written.
template
@@ -719,7 +708,7 @@ namespace Step16
{
Vector estimated_error_per_cell (triangulation.n_active_cells());
- KellyErrorEstimator::estimate (static_cast&>(mg_dof_handler),
+ KellyErrorEstimator::estimate (dof_handler,
QGauss(3),
typename FunctionMap::type(),
solution,
@@ -737,7 +726,7 @@ namespace Step16
{
DataOut data_out;
- data_out.attach_dof_handler (mg_dof_handler);
+ data_out.attach_dof_handler (dof_handler);
data_out.add_data_vector (solution, "solution");
data_out.build_patches ();
@@ -784,10 +773,10 @@ namespace Step16
setup_system ();
std::cout << " Number of degrees of freedom: "
- << mg_dof_handler.n_dofs()
+ << dof_handler.n_dofs()
<< " (by level: ";
for (unsigned int level=0; levelfalse argument we pass when creating the sparsity
-pattern. It tells the sparsity pattern that constrained entries should not
-be inserted into the sparsity pattern. This makes sense for the way we're
-going to do assembly, which does the elimination of constraints already
-locally. Hence, we will never write the constrained entries. This is the
-second key to increase this program's performance, since the system matrix
-will have less entries — this saves both memory and computing time
-when doing matrix-vector multiplications or applying a preconditioner.
-
-In a similar vein, we have to slightly modify the way we copy local
-contributions into global matrices and vectors. In previous tutorial programs,
-we have always used a process like this:
-@code
- typename hp::DoFHandler::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
- for (; cell!=endc; ++cell)
- {
- ... // assemble local contributions them into global object
-
- cell->get_dof_indices (local_dof_indices);
- for (unsigned int i=0; i::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
- for (; cell!=endc; ++cell)
- {
- ... // assemble local contributions them into global object
-
- cell->get_dof_indices (local_dof_indices);
-
- hanging_node_constraints
- .distribute_local_to_global (cell_matrix, cell_rhs,
- local_dof_indices,
- system_matrix, system_rhs);
- }
-@endcode
-Essentially, what the
-hanging_node_constraints.distribute_local_to_global
call does is
-to implement the same loops as before, but whenever we hit a constrained degree
-of freedom, the function does the right thing and already condenses it away.
-
-Using this technique of eliminating constrained nodes already when
-transferring local contributions into the global objects, we avoid the problem
-of having to go back later and change these objects. Timing these operations
-shows that this makes the overall algorithms faster.
+It turns out that the strategy presented first in step-6 to eliminate the
+constraints while computing the element matrices and vectors with
+ConstraintMatrix::distribute_local_to_global is the most efficient approach
+also for this case. The alternative strategy to first build the matrix without
+constraints and then "condensing" away constrained degrees of freedom is
+considerably more expensive. It turns out that building the sparsity pattern
+by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the
+number of unknowns, whereas an ideal finite element program would of course
+only have algorithms that are linear in the number of unknowns. Timing the
+sparsity pattern creation as well as the matrix assembly shows that the
+algorithm presented in step-6 (and used in the code below) is indeed faster.
In our program, we will also treat the boundary conditions as (possibly
inhomogeneous) constraints and eliminate the matrix rows and columns to
@@ -783,7 +699,7 @@ those as well. All we have to do for this is to call the function that
interpolates the Dirichlet boundary conditions already in the setup phase in
order to tell the ConstraintMatrix object about them, and then do the
transfer from local to global data on matrix and vector simultaneously. This
-is exactly what we've shown in the step-22 tutorial program.
+is exactly what we've shown in step-6.
The test case
diff --git a/deal.II/examples/step-27/step-27.cc b/deal.II/examples/step-27/step-27.cc
index 1a4beaca1f..2883bc2841 100644
--- a/deal.II/examples/step-27/step-27.cc
+++ b/deal.II/examples/step-27/step-27.cc
@@ -192,26 +192,10 @@ namespace Step27
// @sect4{LaplaceProblem::setup_system}
//
- // This function is again an almost verbatim copy of what we already did in
- // step-6. The first change is that we append the Dirichlet boundary
- // conditions to the ConstraintMatrix object, which we consequently call
- // just constraints
instead of
- // hanging_node_constraints
. The second difference is that we
- // don't directly build the sparsity pattern, but first create an
- // intermediate object that we later copy into the usual SparsityPattern
- // data structure, since this is more efficient for the problem with many
- // entries per row (and different number of entries in different rows). In
- // another slight deviation, we do not first build the sparsity pattern and
- // then condense away constrained degrees of freedom, but pass the
- // constraint matrix object directly to the function that builds the
- // sparsity pattern. We disable the insertion of constrained entries with
- // false as fourth argument in the DoFTools::make_sparsity_pattern
- // function. All of these changes are explained in the introduction of this
- // program.
- //
- // The last change, maybe hidden in plain sight, is that the dof_handler
- // variable here is an hp object -- nevertheless all the function calls we
- // had before still work in exactly the same way as they always did.
+ // This function is again a verbatim copy of what we already did in
+ // step-6. Despite function calls with exactly the same names and arguments,
+ // the algorithms used internally are different in some aspect since the
+ // dof_handler variable here is an hp object.
template
void LaplaceProblem::setup_system ()
{
@@ -324,12 +308,6 @@ namespace Step27
local_dof_indices,
system_matrix, system_rhs);
}
-
- // Now with the loop over all cells finished, we are done for this
- // function. The steps we still had to do at this point in earlier
- // tutorial programs, namely condensing hanging node constraints and
- // applying Dirichlet boundary conditions, have been taken care of by the
- // ConstraintMatrix object constraints
on the fly.
}
diff --git a/deal.II/examples/step-52/CMakeLists.txt b/deal.II/examples/step-52/CMakeLists.txt
new file mode 100644
index 0000000000..ec29e873c4
--- /dev/null
+++ b/deal.II/examples/step-52/CMakeLists.txt
@@ -0,0 +1,31 @@
+##
+# CMake script for the step-52 tutorial program:
+##
+
+# Set the name of the project and target:
+SET(TARGET "step-52")
+
+# Declare all source files the target consists of:
+SET(TARGET_SRC
+ ${TARGET}.cc
+ # You can specify additional files here!
+ )
+
+# Usually, you will not need to modify anything beyond this point...
+
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8)
+
+FIND_PACKAGE(deal.II 8.0 QUIET
+ HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR}
+ )
+IF(NOT ${deal.II_FOUND})
+ MESSAGE(FATAL_ERROR "\n"
+ "*** Could not locate deal.II. ***\n\n"
+ "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n"
+ "or set an environment variable \"DEAL_II_DIR\" that contains this path."
+ )
+ENDIF()
+
+DEAL_II_INITIALIZE_CACHED_VARIABLES()
+PROJECT(${TARGET})
+DEAL_II_INVOKE_AUTOPILOT()
diff --git a/deal.II/examples/step-52/doc/builds-on b/deal.II/examples/step-52/doc/builds-on
new file mode 100644
index 0000000000..48a0f73876
--- /dev/null
+++ b/deal.II/examples/step-52/doc/builds-on
@@ -0,0 +1 @@
+step-4
diff --git a/deal.II/examples/step-52/doc/intro.dox b/deal.II/examples/step-52/doc/intro.dox
new file mode 100644
index 0000000000..4ae88a593d
--- /dev/null
+++ b/deal.II/examples/step-52/doc/intro.dox
@@ -0,0 +1,102 @@
+
+
+This program was contributed by Bruno Turcksin and Damien Lebrun-Grandie.
+
+
+Introducion
+
+This program shows how to use Runge-Kutta methods to solve a time-dependent
+problem.
+
+Problem statement
+
+In this example, we solve the energy-integrated time-dependent diffusion
+approximation of the neutron transport equation (see step-28 for the
+time-independent multigroup diffusion). We assume that the medium is not
+fissible and therefore, the neutron flux satisfies the following equation:
+@f{eqnarray*}
+\frac{1}{v}\frac{\partial \phi(x,t)}{\partial t} = \nabla D(x) \nabla \phi(x,t)
+- \Sigma_a(x) \phi(x,t) + S(x,t)
+@f}
+augmented by appropriate boundary conditions. Here, $v$ is the velocity of
+neutrons, $D$ is the diffusion coefficient, $\Sigma_a$ is the absorption
+cross section, and $S$ is a source. Because we are only interested in the
+time dependence, we assume that $D$ and $\Sigma_a$ are constant. In this
+example, we are only interested in the error in time and thus, we are looking
+for a solution of the form:
+@f{eqnarray*}
+\phi(x,t) = A\sin(\omega t)(bx-x^2).
+@f}
+By using quadratic finite elements, we will not have any spatial error. We
+impose the following boundary conditions: homogeneous Dirichlet fo $x=0$ and
+$x=b$ and homogeneous Neumann conditions for $y=0$ and $y=b$. The source is
+given by:
+@f{eqnarray*}
+S=A\left(\frac{1}{v}\omega \cos(\omega t)(bx -x^2) + \sin(\omega t)
+\left(\Sigma_a (bx-x^2)+2D\right) \right).
+@f}
+Because the solution is a sine, we know that
+\f$\phi\left(x,\frac{\pi}{\omega}\right) = 0$. Therefore, we can easily
+compute the error at this time since it is simply the norm of the solution
+found.
+
+Runge-Kutta
+
+The Runke-Kutta methods implemented in deal.II assume that the equation to be
+solved can be written as:
+@f{eqnarray*}
+\frac{dy}{dt} = f(t,y).
+@f}
+When using finite elements, the previous equation becomes:
+@f{eqnarray*}
+M\frac{dy}{dt} = f(t,y),
+@f}
+where $M$ is the mass matrix. Therefore, we have:
+@f{eqnarray*}
+\frac{dy}{dt} = M^{-1}f(t,y).
+@f}
+Runke-Kutta methods can be written as:
+@f{eqnarray*}
+y_{n+1} = y_n + \sum_{i=1}^s b_i k_i
+@f}
+where
+@f{eqnarray*}
+k_i = h M^{-1} f(t_n+c_ih,y_n+\sum_{j=1}^sa_{ij}k_j)
+@f}
+with $a_{ij}$, $b_i$, and $c_i$ are known coefficient and $h$ is the time step
+used. The methods currently implemented in deal.II can be divided in three
+categories:
+
+- explicit Runge-Kutta
+
- embedded (or adaptive) Runge-Kutta
+
- implicit Runge-Kutta
+
+
+Explicit Runge-Kutta
+These methods that include for forward Euler, third order Runge-Kutta, and
+fourth order Runge-Kutta, require a function to evaluate $M^{-1}f(t,y). These
+methods become unstable when the time step chosen is too large.
+
+Embedded Runge-Kutta
+These methods include Heun-Euler, Bogacki-Shampine, Dormand-Prince (ode45 in
+Matlab), Fehlberg, and Cash-Karp. These methods use a low order method to
+estimate the error and decide if the time step needs to be refined or it can be
+coarsen. Only embedded explicit methods have been implemented so far.
+
+Implicit Runge-Kutta
+These methods include backward Euler, implicit midpoint, Crank-Nicolson, and the
+two stages SDIRK. These methods require to evaluate $M^{-1}f(t,y)$ and
+$\left(I-\Delta t M^{-1} \frac{\partial f}{\partial Y}\right) = \left(M - \Delta
+t \frac{\partial f}{\partial y}\right)^{-1} M$. These methods are always stable.
+
+Remarks
+To simplify the problem, we solve the domain in two dimensional and the mesh is
+uniform (there is no need to adapt the mesh since we use quadratic finite
+elements and the exact solution is quadratic). Going from a two dimensional
+domain to a three dimensional domain is not very challenging. However if the
+mesh must be adapted, we cannot forget to:
+
+- project the solution to the new mesh when the mesh is changed. The mesh
+used should be the same at the beginning and at the end of the time step.
+
- update the mass matrix and its inverse.
+
diff --git a/deal.II/examples/step-52/doc/kind b/deal.II/examples/step-52/doc/kind
new file mode 100644
index 0000000000..86a44aa1ef
--- /dev/null
+++ b/deal.II/examples/step-52/doc/kind
@@ -0,0 +1 @@
+time dependent
diff --git a/deal.II/examples/step-52/doc/results.dox b/deal.II/examples/step-52/doc/results.dox
new file mode 100644
index 0000000000..4a70bbe4bc
--- /dev/null
+++ b/deal.II/examples/step-52/doc/results.dox
@@ -0,0 +1,30 @@
+Results
+
+The output of this program consist of the console output and solutions given in
+vtu format.
+
+The console output is:
+@code
+Forward Euler error: 1.00883
+Third order Runge-Kutta error: 0.000227982
+Fourth order Runge-Kutta error: 1.90541e-06
+Backward Euler error: 1.03428
+Implicit Midpoint error: 0.00862702
+Crank-Nicolson error: 0.00862675
+SDIRK error: 0.0042349
+Heun-Euler error: 0.0073012
+Number of steps done: 284
+Bogacki-Shampine error: 0.000207511
+Number of steps done: 200
+Dopri error: 4.01775e-09
+Number of steps done: 200
+Fehlberg error: 9.89504e-09
+Number of steps done: 200
+Cash-Karp error: 2.5579e-10
+Number of steps done: 200
+@endcode
+
+Like expected the high-order methods give a more accurate solutions. We see that
+the Heun-Euler method adapted the number of time steps in order to satisfy the
+tolerance. The others embedded methods did not need to change the number of time
+steps.
diff --git a/deal.II/examples/step-52/doc/tooltip b/deal.II/examples/step-52/doc/tooltip
new file mode 100644
index 0000000000..d35ea621a8
--- /dev/null
+++ b/deal.II/examples/step-52/doc/tooltip
@@ -0,0 +1 @@
+Time-dependent diffusion equation. Neutron transport
diff --git a/deal.II/examples/step-52/step-52.cc b/deal.II/examples/step-52/step-52.cc
new file mode 100644
index 0000000000..1031e49cdd
--- /dev/null
+++ b/deal.II/examples/step-52/step-52.cc
@@ -0,0 +1,629 @@
+/* ---------------------------------------------------------------------
+ * $Id: step-52.cc 30526 2013-08-29 20:06:27Z felix.gruber $
+ *
+ * Copyright (C) 2014 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE at
+ * the top level of the deal.II distribution.
+ *
+ * ---------------------------------------------------------------------
+
+ *
+ * Authors: Damien Lebrun-Grandie, Bruno Turcksin, 2014
+ */
+
+// @sect3{Include files}
+
+// The first task as usal is to include the functionality of these well-known
+// deal.II library files and some C++ header files.
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+#include