From: bangerth Date: Wed, 3 Sep 2008 12:52:45 +0000 (+0000) Subject: Add step-32, a parallelized version of step-31. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fd7a1540edc69652c29a991f164d2e8706897696;p=dealii-svn.git Add step-32, a parallelized version of step-31. git-svn-id: https://svn.dealii.org/trunk@16727 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-22/Makefile b/deal.II/examples/step-22/Makefile index a4436031f6..dbee705c63 100644 --- a/deal.II/examples/step-22/Makefile +++ b/deal.II/examples/step-22/Makefile @@ -14,7 +14,7 @@ target = $(basename $(shell echo step-*.cc)) # run-time checking of parameters and internal states is performed, so # you should set this value to `on' while you develop your program, # and to `off' when running production computations. -debug-mode = on +debug-mode = off # As third field, we need to give the path to the top-level deal.II diff --git a/deal.II/examples/step-22/step-22.cc b/deal.II/examples/step-22/step-22.cc index d95e491974..e65e55cfbb 100644 --- a/deal.II/examples/step-22/step-22.cc +++ b/deal.II/examples/step-22/step-22.cc @@ -221,7 +221,10 @@ BoundaryValues::value (const Point &p, ExcIndexRange (component, 0, this->n_components)); if (component == 0) - return (p[0] < 0 ? -1 : (p[0] > 0 ? 1 : 0)); + if (p[1] < 0) + return (p[0] < -0.5 ? -1 : (p[0] > -0.5 ? 1 : 0)); + else + return (p[0] < 0.5 ? -1 : (p[0] > 0.5 ? 1 : 0)); return 0; } @@ -1201,7 +1204,7 @@ StokesProblem::output_results (const unsigned int refinement_cycle) const << ".vtk"; std::ofstream output (filename.str().c_str()); - data_out.write_vtk (output); + data_out.write_gmv (output); } @@ -1272,13 +1275,14 @@ void StokesProblem::run () { std::vector subdivisions (dim, 1); subdivisions[0] = 4; + subdivisions[1] = 4; const Point bottom_left = (dim == 2 ? Point(-2,-1) : - Point(-2,0,-1)); + Point(-2,-2,-1)); const Point top_right = (dim == 2 ? Point(2,0) : - Point(2,1,0)); + Point(2,2,0)); GridGenerator::subdivided_hyper_rectangle (triangulation, subdivisions, diff --git a/deal.II/examples/step-32/Makefile b/deal.II/examples/step-32/Makefile new file mode 100644 index 0000000000..7c05bcbdb8 --- /dev/null +++ b/deal.II/examples/step-32/Makefile @@ -0,0 +1,188 @@ +# $Id: Makefile,v 1.4 2006/02/10 17:53:05 wolf Exp $ + + +# For the small projects Makefile, you basically need to fill in only +# four fields. +# +# The first is the name of the application. It is assumed that the +# application name is the same as the base file name of the single C++ +# file from which the application is generated. +target = $(basename $(shell echo step-*.cc)) + +# The second field determines whether you want to run your program in +# debug or optimized mode. The latter is significantly faster, but no +# run-time checking of parameters and internal states is performed, so +# you should set this value to `on' while you develop your program, +# and to `off' when running production computations. +debug-mode = on + + +# As third field, we need to give the path to the top-level deal.II +# directory. You need to adjust this to your needs. Since this path is +# probably the most often needed one in the Makefile internals, it is +# designated by a single-character variable, since that can be +# reference using $D only, i.e. without the parentheses that are +# required for most other parameters, as e.g. in $(target). +D = ../../ + + +# The last field specifies the names of data and other files that +# shall be deleted when calling `make clean'. Object and backup files, +# executables and the like are removed anyway. Here, we give a list of +# files in the various output formats that deal.II supports. +clean-up-files = *gmv *gnuplot *gpl *eps *pov *vtk + + + + +# +# +# Usually, you will not need to change anything beyond this point. +# +# +# The next statement tell the `make' program where to find the +# deal.II top level directory and to include the file with the global +# settings +include $D/common/Make.global_options + +################################################################ +# This example program will only work if Trilinos is installed. If this +# is not the case, then simply redefine the main targets to to nothing +ifneq ($(USE_CONTRIB_TRILINOS),yes) +default run clean: + @echo + @echo "===========================================================" + @echo "= This program cannot be compiled without Trilinos. Make=" + @echo "= sure you have Trilinos installed and detected during =" + @echo "= configuration of deal.II =" + @echo "===========================================================" + @echo + +else +# +################################################################ + + + + +# Since the whole project consists of only one file, we need not +# consider difficult dependencies. We only have to declare the +# libraries which we want to link to the object file, and there need +# to be two sets of libraries: one for the debug mode version of the +# application and one for the optimized mode. Here we have selected +# the versions for 2d. Note that the order in which the libraries are +# given here is important and that your applications won't link +# properly if they are given in another order. +# +# You may need to augment the lists of libraries when compiling your +# program for other dimensions, or when using third party libraries +libs.g = $(lib-deal2-2d.g) \ + $(lib-deal2-3d.g) \ + $(lib-lac.g) \ + $(lib-base.g) +libs.o = $(lib-deal2-2d.o) \ + $(lib-deal2-3d.o) \ + $(lib-lac.o) \ + $(lib-base.o) + + +# We now use the variable defined above which switch between debug and +# optimized mode to select the set of libraries to link with. Included +# in the list of libraries is the name of the object file which we +# will produce from the single C++ file. Note that by default we use +# the extension .g.o for object files compiled in debug mode and .o for +# object files in optimized mode (or whatever the local default on your +# system is instead of .o). +ifeq ($(debug-mode),on) + libraries = $(target).g.$(OBJEXT) $(libs.g) +else + libraries = $(target).$(OBJEXT) $(libs.o) +endif + + +# Now comes the first production rule: how to link the single object +# file produced from the single C++ file into the executable. Since +# this is the first rule in the Makefile, it is the one `make' selects +# if you call it without arguments. +$(target) : $(libraries) + @echo ============================ Linking $@ + @$(CXX) -o $@$(EXEEXT) $^ $(LIBS) $(LDFLAGS) + + +# To make running the application somewhat independent of the actual +# program name, we usually declare a rule `run' which simply runs the +# program. You can then run it by typing `make run'. This is also +# useful if you want to call the executable with arguments which do +# not change frequently. You may then want to add them to the +# following rule: +run: $(target) + @echo ============================ Running $< + @./$(target)$(EXEEXT) + + + +# As a last rule to the `make' program, we define what to do when +# cleaning up a directory. This usually involves deleting object files +# and other automatically created files such as the executable itself, +# backup files, and data files. Since the latter are not usually quite +# diverse, you needed to declare them at the top of this file. +clean: + -rm -f *.$(OBJEXT) *~ Makefile.dep $(target)$(EXEEXT) $(clean-up-files) + + +# Since we have not yet stated how to make an object file from a C++ +# file, we should do so now. Since the many flags passed to the +# compiler are usually not of much interest, we suppress the actual +# command line using the `at' sign in the first column of the rules +# and write the string indicating what we do instead. +./%.g.$(OBJEXT) : + @echo ==============debug========= $( $@ \ + || (rm -f $@ ; false) + @if test -s $@ ; then : else rm $@ ; fi + + +# To make the dependencies known to `make', we finally have to include +# them: +include Makefile.dep + + +endif # USE_CONTRIB_TRILINOS diff --git a/deal.II/examples/step-32/doc/intro.dox b/deal.II/examples/step-32/doc/intro.dox new file mode 100644 index 0000000000..33ce876a6f --- /dev/null +++ b/deal.II/examples/step-32/doc/intro.dox @@ -0,0 +1,914 @@ +
+ +This program was contributed by Martin Kronbichler and Wolfgang +Bangerth. +
+This material is based upon work partly supported by the National +Science Foundation under Award No. EAR-0426271 and The California Institute of +Technology. Any opinions, findings, and conclusions or recommendations +expressed in this publication are those of the author and do not +necessarily reflect the views of the National Science Foundation or of The +California Institute of Technology. +
+ + + +

Introduction

+ +

The Boussinesq equations

+ +This program deals with an interesting physical problem: how does a +fluid (i.e. a liquid or gas) behave if it experiences differences in +buoyancy caused by temperature differences? It is clear that those +parts of the fluid that are hotter (and therefore lighter) are going +to rise up and those that are cooler (and denser) are going to sink +down with gravity. + +In cases where the fluid moves slowly enough such that inertia effects +can be neglected, the equations that describe such behavior are the +Boussinesq equations that read as follows: +@f{eqnarray*} + -\nabla \cdot \eta \varepsilon ({\mathbf u}) + \nabla p &=& + \mathrm{Ra} \; T \mathbf{g}, + \\ + \nabla \cdot {\mathbf u} &=& 0, + \\ + \frac{\partial T}{\partial t} + + + {\mathbf u} \cdot \nabla T + - + \nabla \cdot \kappa \nabla T &=& \gamma. +@f} +These equations fall into the class of vector-valued problems (a +toplevel overview of this topic can be found in the @ref vector_valued module). +Here, u is the velocity field, p the pressure, and T +the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12 +[(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric +gradient of the velocity. As can be seen, velocity and pressure +solve a Stokes equation describing the motion of an incompressible +fluid, an equation we have previously considered in @ref step_22 "step-22"; we +will draw extensively on the experience we have gained in that program, in +particular with regard to efficient linear Stokes solvers. + +The forcing term of the fluid motion is the buoyancy of the +fluid, expressed as the product of the Rayleigh number $\mathrm{Ra}$, +the temperature T and the gravity vector g. (A possibly +more intuitive formulation would use $\mathrm{Ra} \; (T-\bar T) +\mathbf{g}$ as right hand side where $\bar T$ is the average +temperature, and the right hand side then describes the forces due to +local deviations from the average density; this formulation is +entirely equivalent if the gravity vector results from a gravity +potential $\phi$, i.e. $\mathbf{g}=-\nabla\phi$, and yields the exact +same solution except for the pressure which will now be $p+\mathrm{Ra} +\;\bar T \phi$.) + +While the first two equations describe how the fluid reacts to +temperature differences by moving around, the third equation states +how the fluid motion affects the temperature field: it is an advection +diffusion equation, i.e. the temperature is attached to the fluid +particles and advected along in the flow field, with an additional +diffusion (heat conduction) term. In many applications, the diffusion +coefficient is fairly small, and the temperature equation is in fact +transport, not diffusion dominated and therefore in character more hyperbolic +than elliptic; we will have to take this into account when developing a stable +discretization. + +In the equations above, the term $\gamma$ on the right hand side denotes the +heat sources and may be a spatially and temporally varying function. $\eta$ +and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume +constant for this tutorial program. The more general case when $\eta$ depends on +the temperature is an important factor in physical applications: Most materials +become more fluid as they get hotter (i.e., $\eta$ decreases with T); +sometimes, as in the case of rock minerals at temperatures close to their +melting point, $\eta$ may change by orders of magnitude over the typical range +of temperatures. + +$\mathrm{Ra}$, called the Rayleigh +number, is a dimensionless number that describes the ratio of heat +transport due to convection induced by buoyancy changes from +temperature differences, and of heat transport due to thermal +diffusion. A small Rayleigh number implies that buoyancy is not strong +relative to viscosity and fluid motion u is slow enough so +that heat diffusion $\kappa\Delta T$ is the dominant heat transport +term. On the other hand, a fluid with a high Rayleigh number will show +vigorous convection that dominates heat conduction. + +For most fluids for which we are interested in computing thermal +convection, the Rayleigh number is very large, often $10^6$ or +larger. From the structure of the equations, we see that this will +lead to large pressure differences and large velocities. Consequently, +the convection term in the convection-diffusion equation for T will +also be very large and an accurate solution of this equation will +require us to choose small time steps. Problems with large Rayleigh +numbers are therefore hard to solve numerically for similar reasons +that make solving the Navier-Stokes +equations hard to solve when the Reynolds number +$\mathrm{Re}$ is large. + +Note that a large Rayleigh number does not necessarily involve large +velocities in absolute terms. For example, the Rayleigh number in the +earth mantle has a Rayleigh number larger than $10^6$. Yet the +velocities are small: the material is in fact solid rock but it is so +hot and under pressure that it can flow very slowly, on the order of +at most a few centimeters per year. Nevertheless, this can lead to +mixing over time scales of many million years, a time scale much +shorter than for the same amount of heat to be distributed by thermal +conductivity and a time scale of relevance to affect the evolution of the +earth's interior and surface structure. + + + +

%Boundary and initial conditions

+ +Since the Boussinesq equations are derived under the assumption that inertia +of the fluid's motion does not play a role, the flow field is at each time +entirely determined by buoyancy difference at that time, not by the flow field +at previous times. This is reflected by the fact that the first two equations +above are the steady state Stokes equation that do not contain a time +derivative. Consequently, we do not need initial conditions for either +velocities or pressure. On the other hand, the temperature field does satisfy +an equation with a time derivative, so we need initial conditions for T. + +As for boundary conditions: if $\kappa>0$ then the temperature +satisfies a second order differential equation that requires +boundary data all around the boundary for all times. These can either be a +prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary +conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla +T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary +condition, i.e. prescribe no thermal flux: $\phi=0$. + +Similarly, the velocity field requires us to pose boundary conditions. These +may be no-slip no-flux conditions u=0 on $\partial\Omega$ if the fluid +sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf +u = 0$ if the fluid can flow along but not across the boundary, or any number +of other conditions that are physically reasonable. In this program, we will +use no normal flux conditions. + + +

Solution approach

+ +Like the equations solved in @ref step_21 "step-21", we here have a +system of differential-algebraic equations (DAE): with respect to the time +variable, only the temperature equation is a differential equation +whereas the Stokes system for u and p has no +time-derivatives and is therefore of the sort of an algebraic +constraint that has to hold at each time instant. The main difference +to @ref step_21 "step-21" is that the algebraic constraint there was a +mixed Laplace system of the form +@f{eqnarray*} + \mathbf u + {\mathbf K}\lambda \nabla p &=& 0, \\ + \nabla\cdot \mathbf u &=& f, +@f} +where now we have a Stokes system +@f{eqnarray*} + -\nabla \cdot \eta \varepsilon ({\mathbf u}) + \nabla p &=& f, \\ + \nabla\cdot \mathbf u &=& 0, +@f} +where $\nabla \cdot \eta \varepsilon (\cdot)$ is an operator similar to the +Laplacian $\Delta$ applied to a vector field. + +Given the similarity to what we have done in @ref step_21 "step-21", +it may not come as a surprise that we choose a similar approach, +although we will have to make adjustments for the change in operator +in the top-left corner of the differential operator. + + +

Time stepping

+ +The structure of the problem as a DAE allows us to use the same +strategy as we have already used in @ref step_21 "step-21", i.e. we +use a time lag scheme: first solve the Stokes equations for velocity and +pressure using the temperature field from the previous time step, then +with the new velocities update the temperature field for the current +time step. In other words, in time step n we first solve the Stokes +system +@f{eqnarray*} + -\nabla \cdot \eta \varepsilon ({\mathbf u}^n) + \nabla p^n &=& + \mathrm{Ra} \; T^{n-1} \mathbf{g}, + \\ + \nabla \cdot {\mathbf u}^n &=& 0, +@f} +and then the temperature equation with the so-computed velocity field +${\mathbf u}^n$. In contrast to @ref step_21 "step-21", we'll use a +higher order time stepping scheme here, namely the Backward +Differentiation Formula scheme of order 2 (BDF-2 in short) that +replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) +difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with +k the time step size. + +This gives the discretized-in-time temperature equation +@f{eqnarray*} + \frac 32 T^n + - + k\nabla \cdot \kappa \nabla T^n + &=& + 2 T^{n-1} + - + \frac 12 T^{n-2} + - + k{\mathbf u}^n \cdot \nabla (2T^{n-1}-T^{n-2}) + + + k\gamma. +@f} +Note how the temperature equation is +solved semi-explicitly: diffusion is treated implicitly whereas +advection is treated explicitly using the just-computed velocity +field but only previously computed temperature fields. The +temperature terms appearing in the advection term are forward +projected to the current time: +$T^n \approx T^{n-1} + k_n +\frac{\partial T}{\partial t} \approx T^{n-1} + k_n +\frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection +for maintaining the order of accuracy of the BDF-2 scheme. In other words, the +temperature fields we use in the explicit right hand side are first +order approximations of the current temperature field — not +quite an explicit time stepping scheme, but by character not too far +away either. + +The introduction of the temperature extrapolation limits the time step +by a +Courant-Friedrichs-Lewy (CFL) condition just like it was in +@ref step_21 "step-21". (We wouldn't have had that stability condition if +we treated the advection term implicitly since the BDF-2 scheme is A-stable, +at the price that we needed to build a new temperature matrix at each time +step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that +this CFL condition means that the time step +size k may change from time step to time step, and that we have to +modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps +sizes of the current and previous time step, then we use the +approximations +$\frac{\partial T}{\partial t} \approx + \frac 1{k_n} + \left( + \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} T^{n} + - + \frac{k_n+k_{n-1}}{k_{n-1}}T^{n-1} + + + \frac{k_n^2}{k_{n-1}(k_n+k_{n-1})} T^{n-2} + \right)$ +and +$T^n \approx + T^{n-1} + k_n \frac{\partial T}{\partial t} + \approx + T^{n-1} + k_n + \frac{T^{n-1}-T^{n-2}}{k_{n-1}} + = + \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2}$, +and above equation is generalized as follows: +@f{eqnarray*} + \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} T^n + - + k_n\nabla \cdot \kappa \nabla T^n + &=& + \frac{k_n+k_{n-1}}{k_{n-1}} T^{n-1} + - + \frac{k_n^2}{k_{n-1}(k_n+k_{n-1})} T^{n-2} + - + k_n{\mathbf u}^n \cdot \nabla \left[ + \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2} + \right] + + + k_n\gamma. +@f} +That's not an easy to read equation, but will provide us with the +desired higher order accuracy. As a consistency check, it is easy to +verify that it reduces to the same equation as above if $k_n=k_{n-1}$. + +As a final remark we note that the choice of a higher order time +stepping scheme of course forces us to keep more time steps in memory; +in particular, we here will need to have $T^{n-2}$ around, a vector +that we could previously discard. This seems like a nuisance that we +were able to avoid previously by using only a first order time +stepping scheme, but as we will see below when discussing the topic of +stabilization, we will need this vector anyway and so keeping it +around for time discretization is essentially for free and gives us +the opportunity to use a higher order scheme. + + +

Weak form and space discretization for the Stokes part

+ +Like solving the mixed Laplace equations, solving the Stokes equations +requires us to choose particular pairs of finite elements for +velocities and pressure variables. Because this has already been discussed in +@ref step_22 "step-22", we only cover this topic briefly: +Here, we use the +stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous +elements, so we can form the weak form of the Stokes equation without +problem by integrating by parts and substituting continuous functions +by their discrete counterparts: +@f{eqnarray*} + (\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^n_h)) + - + (\nabla \cdot {\mathbf v}_h, p^n_h) + &=& + ({\mathbf v}_h, \mathrm{Ra} \; T^{n-1}_h \mathbf{g}), + \\ + (q_h, \nabla \cdot {\mathbf u}^n_h) &=& 0, +@f} +for all test functions $\mathbf v_h, q_h$. The first term of the first +equation is considered as the inner product between tensors, i.e. +$(\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^n_h))_\Omega + = \int_\Omega \sum_{i,j=1}^d [\nabla {\mathbf v}_h]_{ij} + \eta [\varepsilon ({\mathbf u}^n_h)]_{ij}\, dx$. +Because the second tensor in this product is symmetric, the +anti-symmetric component of $\nabla {\mathbf v}_h$ plays no role and +it leads to the entirely same form if we use the symmetric gradient of +$\mathbf v_h$ instead. Consequently, the formulation we consider and +that we implement is +@f{eqnarray*} + (\varepsilon({\mathbf v}_h), \eta \varepsilon ({\mathbf u}^n_h)) + - + (\nabla \cdot {\mathbf v}_h, p^n_h) + &=& + ({\mathbf v}_h, \mathrm{Ra} \; T^{n-1}_h \mathbf{g}), + \\ + (q_h, \nabla \cdot {\mathbf u}^n_h) &=& 0. +@f} + +This is exactly the same as what we already discussed in +@ref step_22 "step-22" and there is not much more to say about this here. + + +

Stabilization, weak form and space discretization for the temperature equation

+ +The more interesting question is what to do with the temperature +advection-diffusion equation. By default, not all discretizations of +this equation are equally stable unless we either do something like +upwinding, stabilization, or all of this. One way to achieve this is +to use discontinuous elements (i.e. the FE_DGQ class that we used, for +example, in the discretization of the transport equation in +@ref step_12 "step-12", or in discretizing the pressure in +@ref step_20 "step-20" and @ref step_21 "step-21") and to define a +flux at the interface between cells that takes into account +upwinding. If we had a pure advection problem this would probably be +the simplest way to go. However, here we have some diffusion as well, +and the discretization of the Laplace operator with discontinuous +elements is cumbersome because of the significant number of additional +terms that need to be integrated on each face between +cells. Discontinuous elements also have the drawback that the use of +numerical fluxes introduces an additional numerical diffusion that +acts everywhere, whereas we would really like to minimize the effect +of numerical diffusion to a minimum and only apply it where it is +necessary to stabilize the scheme. + +A better alternative is therefore to add some nonlinear viscosity to +the model. Essentially, what this does is to transform the temperature +equation from the form +@f{eqnarray*} + \frac{\partial T}{\partial t} + + + {\mathbf u} \cdot \nabla T + - + \nabla \cdot \kappa \nabla T &=& \gamma +@f} +to something like +@f{eqnarray*} + \frac{\partial T}{\partial t} + + + {\mathbf u} \cdot \nabla T + - + \nabla \cdot (\kappa+\nu(T)) \nabla T &=& \gamma, +@f} +where $\nu(T)$ is an addition viscosity (diffusion) term that only +acts in the vicinity of shocks and other discontinuities. $\nu(T)$ is +chosen in such a way that if T satisfies the original equations, the +additional viscosity is zero. + +To achieve this, the literature contains a number of approaches. We +will here follow one developed by Guermond and Popov that builds on a +suitably defined residual and a limiting procedure for the additional +viscosity. To this end, let us define a residual $R_\alpha(T)$ as follows: +@f{eqnarray*} + R_\alpha(T) + = + \left( + \frac{\partial T}{\partial t} + + + {\mathbf u} \cdot \nabla T + - + \nabla \cdot \kappa \nabla T - \gamma + \right) + T^{\alpha-1} +@f} +where we will later choose the stabilization exponent $\alpha$ from +within the range $[1,2]$. Note that $R_\alpha(T)$ will be zero if $T$ +satisfies the temperature equation, since then the term in parentheses +will be zero. Multiplying terms out, we get the following, entirely +equivalent form: +@f{eqnarray*} + R_\alpha(T) + = + \frac 1\alpha + \frac{\partial (T^\alpha)}{\partial t} + + + \frac 1\alpha + {\mathbf u} \cdot \nabla (T^\alpha) + - + \frac 1\alpha + \nabla \cdot \kappa \nabla (T^\alpha) + + + \kappa(\alpha-1) + T^{\alpha-2} |\nabla T|^\alpha + - + \gamma + T^{\alpha-1} +@f} + +With this residual, we can now define the artificial viscosity as +a piecewise constant function defined on each cell $K$ with diameter +$h_K$ separately as +follows: +@f{eqnarray*} + \nu_\alpha(T)|_K + = + \beta + \|\mathbf{u}\|_{L^\infty(K)} + \min\left\{ + h_K, + h_K^\alpha + \frac{\|R_\alpha(T)\|_{L^\infty(K)}}{c(\mathbf{u},T)} + \right\} +@f} + +Here, $\beta$ is a stabilization constant (a dimensional analysis +reveals that it is unitless and therefore independent of scaling; we will +discuss its choice in the results section) and +$c(\mathbf{u},T)$ is a normalization constant that must have units +$\frac{m^{\alpha-1}K^\alpha}{s}$. We will choose it as +$c(\mathbf{u},T) = + c_R\ \|\mathbf{u}\|_{L^\infty(\Omega)} \ \mathrm{var}(T) + \ |\mathrm{diam}(\Omega)|^{\alpha-2}$, +where $\mathrm{var}(T)=\max_\Omega T - \min_\Omega T$ is the range of present +temperature values (remember that buoyancy is driven by temperature +variations, not the absolute temperature) and $c_R$ is a dimensionless +constant. To understand why this method works consider this: If on a particular +cell $K$ the temperature field is smooth, then we expect the residual +to be small there (in fact to be on the order of ${\cal O}(h_K)$) and +the stabilization term that injects artificial diffusion will there be +of size $h_K^{\alpha+1}$ — i.e. rather small, just as we hope it to +be when no additional diffusion is necessary. On the other hand, if we +are on or close to a discontinuity of the temperature field, then the +residual will be large; the minimum operation in the definition of +$\nu_\alpha(T)$ will then ensure that the stabilization has size $h_K$ +— the optimal amount of artificial viscosity to ensure stability of +the scheme. + +It is certainly a good questions whether this scheme really works? +Computations by Guermond and Popov have shown that this form of +stabilization actually performs much better than most of the other +stabilization schemes that are around (for example streamline +diffusion, to name only the simplest one). Furthermore, for $\alpha\in +[1,2)$ they can even prove that it produces better convergence orders +for the linear transport equation than for example streamline +diffusion. For $\alpha=2$, no theoretical results are currently +available, but numerical tests indicate that the results +are considerably better than for $\alpha=1$. + +A more practical question is how to introduce this artificial +diffusion into the equations we would like to solve. Note that the +numerical viscosity $\nu(T)$ is temperature-dependent, so the equation +we want to solve is nonlinear in T — not what one desires from a +simple method to stabilize an equation, and even less so if we realize +that $\nu(T)$ is non-differentiable in T. However, there is no +reason to despair: we still have to discretize in time and we can +treat the term explicitly. + +In the definition of the stabilization parameter, we approximate the time +derivative by $\frac{\partial T}{\partial t} \approx +\frac{T^{n-1}-T^{n-2}}{k^{n-1}}$. This approximation makes only use +of available time data and this is the reason why we need to store data of two +previous time steps (which enabled us to use the BDF-2 scheme without +additional storage cost). We could now simply evaluate the rest of the +terms at $t_{n-1}$, but then the discrete residual would be nothing else than +a backward Euler approximation, which is only first order accurate. So, in +case of smooth solutions, the residual would be still of the order h, +despite the second order time accuracy in the outer BDF-2 scheme and the +spatial FE discretization. This is certainly not what we want to have +(in fact, we desired to have small residuals in regions where the solution +behaves nicely), so a bit more care is needed. The key to this problem +is to observe that the first derivative as we constructed it is actually +centered at $t_{n-\frac{3}{2}}$. We get the desired second order accurate +residual calculation if we evaluate all spatial terms at $t_{n-\frac{3}{2}}$ +by using the approximation $\frac 12 T^{n-1}+\frac 12 T^{n-2}$, which means +that we calculate the nonlinear viscosity as a function of this +intermediate temperature, $\nu_\alpha = +\nu_\alpha\left(\frac 12 T^{n-1}+\frac 12 T^{n-2}\right)$. Note that this +evaluation of the residual is nothing else than a Crank-Nicholson scheme, +so we can be sure that now everything is alright. One might wonder whether +it is a problem that the numerical viscosity now is not evaluated at +time n (as opposed to the rest of the equation). However, this offset +is uncritical: For smooth solutions, $\nu_\alpha$ will vary continuously, +so the error in time offset is k times smaller than the nonlinear +viscosity itself, i.e., it is a small higher order contribution that is +left out. That's fine because the term itself is already at the level of +discretization error in smooth regions. + +Using the BDF-2 scheme introduced above, +this yields for the simpler case of uniform time steps of size k: +@f{eqnarray*} + \frac 32 T^n + - + k\nabla \cdot \kappa \nabla T^n + &=& + 2 T^{n-1} + - + \frac 12 T^{n-2} + \\ + && + + + k\nabla \cdot + \left[ + \nu_\alpha\left(\frac 12 T^{n-1}+\frac 12 T^{n-2}\right) + \ \nabla (2T^{n-1}-T^{n-2}) + \right] + \\ + && + - + k{\mathbf u}^n \cdot \nabla (2T^{n-1}-T^{n-2}) + \\ + && + + + k\gamma. +@f} +On the left side of this equation remains the term from the time +derivative and the original (physical) diffusion which we treat +implicitly (this is actually a nice term: the matrices that result +from the left hand side are the mass matrix and a multiple of the +Laplace matrix — both are positive definite and if the time step +size k is small, the sum is simple to invert). On the right hand +side, the terms in the first line result from the time derivative; in +the second line is the artificial diffusion at time $t_{n-\frac +32}$; the third line contains the +advection term, and the fourth the sources. Note that the +artificial diffusion operates on the extrapolated +temperature at the current time in the same way as we have discussed +the advection works in the section on time stepping. + +The form for non-uniform time steps that we will have to use in +reality is a bit more complicated (which is why we showed the simpler +form above first) and reads: +@f{eqnarray*} + \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} T^n + - + k_n\nabla \cdot \kappa \nabla T^n + &=& + \frac{k_n+k_{n-1}}{k_{n-1}} T^{n-1} + - + \frac{k_n^2}{k_{n-1}(k_n+k_{n-1})} T^{n-2} + \\ + && + + + k_n\nabla \cdot + \left[ + \nu_\alpha\left(\frac 12 T^{n-1}+\frac 12 T^{n-2}\right) + \ \nabla \left[ + \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2} + \right] + \right] + \\ + && + - + k_n{\mathbf u}^n \cdot \nabla \left[ + \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2} + \right] + \\ + && + + + k_n\gamma. +@f} + +After settling all these issues, the weak form follows naturally from +the strong form shown in the last equation, and we immediately arrive +at the weak form of the discretized equations: +@f{eqnarray*} + \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} (\tau_h,T_h^n) + + + k_n (\nabla \tau_h, \kappa \nabla T_h^n) + &=& + \biggl(\tau_h, + \frac{k_n+k_{n-1}}{k_{n-1}} T_h^{n-1} + - + \frac{k_n^2}{k_{n-1}(k_n+k_{n-1})} T_h^{n-2} + \\ + &&\qquad\qquad + - + k_n{\mathbf u}_h^n \cdot \nabla \left[ + \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2} + \right] + + + k_n\gamma \biggr) + \\ + && + - + k_n \left(\nabla \tau_h, + \nu_\alpha\left(\frac 12 T_h^{n-1}+\frac 12 T_h^{n-2}\right) + \ \nabla \left[ + \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2} + \right] + \right) +@f} +for all discrete test functions $\tau_h$. Here, the diffusion term has been +integrated by parts, and we have used that we will impose no thermal flux, +$\mathbf{n}\cdot\kappa\nabla T|_{\partial\Omega}=0$. + +This then results in a +matrix equation of form +@f{eqnarray*} + \left( \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} M+k_n A_T\right) T_h^n = F(U_h^n,T_h^{n-1},T_h^{n-2}), +@f} +which given the structure of matrix on the left (the sum of two +positive definite matrices) is easily solved using the Conjugate +Gradient method. + + + +

Linear solvers

+ +As explained above, our approach to solving the joint system for +velocities/pressure on the one hand and temperature on the other is to use an +operator splitting where we first solve the Stokes system for the velocities +and pressures using the old temperature field, and then solve for the new +temperature field using the just computed velocity field. + + +
Linear solvers for the Stokes problem
+ +Solving the linear equations coming from the Stokes system has been +discussed in great detail in @ref step_22 "step-22". In particular, in +the results section of that program, we have discussed a number of +alternative linear solver strategies that turned out to be more +efficient than the original approach. The best alternative +identified there we to use a GMRES solver preconditioned by a block +matrix involving the Schur complement. Specifically, the Stokes +operator leads to a block structured matrix +@f{eqnarray*} + \left(\begin{array}{cc} + A & B^T \\ B & 0 + \end{array}\right) +@f} +and as discussed there a good preconditioner is +@f{eqnarray*} + P^{-1} + = + \left(\begin{array}{cc} + A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1} + \end{array}\right) +@f} +where S is the Schur complement of the Stokes operator +$S=B^TA^{-1}B$. Of course, this preconditioner is not useful because we +can't form the various inverses of matrices, but we can use the +following as a preconditioner: +@f{eqnarray*} + \tilde P^{-1} + = + \left(\begin{array}{cc} + \tilde A^{-1} & 0 \\ \tilde S^{-1} B \tilde A^{-1} & -\tilde S^{-1} + \end{array}\right) +@f} +where $\tilde A^{-1},\tilde S^{-1}$ are approximations to the inverse +matrices. In particular, it turned out that S is spectrally +equivalent to the mass matrix and consequently replacing $\tilde +S^{-1}$ by a CG solver applied to the mass matrix on the pressure +space was a good choice. + +It was more complicated to come up with a good replacement $\tilde +A^{-1}$, which corresponds to the discretized symmetric Laplacian of +the vector-valued velocity field, i.e. +$A_{ij} = (\varepsilon {\mathbf v}_i, \eta \varepsilon ({\mathbf +v}_j))$. +In @ref step_22 "step-22" we used a sparse LU decomposition (using the +SparseDirectUMFPACK class) of A for $\tilde A^{-1}$ — the +perfect preconditioner — in 2d, but for 3d memory and compute +time is not usually sufficient to actually compute this decomposition; +consequently, we only use an incomplete LU decomposition (ILU, using +the SparseILU class) in 3d. + +For this program, we would like to go a bit further. To this end, note +that the symmetrized bilinear form on vector fields, +$(\varepsilon {\mathbf v}_i, \eta \varepsilon ({\mathbf v}_j))$ +is not too far away from the nonsymmetrized version, +$(\nabla {\mathbf v}_i, \eta \nabla {\mathbf v}_j) += \sum_{k,l=1}^d + (\partial_k ({\mathbf v}_i)_l, \eta \partial_k ({\mathbf v}_j)_l) +$. The latter, +however, has the advantage that the dim vector components +of the test functions are not coupled (well, almost, see below), +i.e. the resulting matrix is block-diagonal: one block for each vector +component, and each of these blocks is equal to the Laplace matrix for +this vector component. So assuming we order degrees of freedom in such +a way that first all x-components of the velocity are numbered, then +the y-components, and then the z-components, then the matrix +$\hat A$ that is associated with this slightly different bilinear form has +the form +@f{eqnarray*} + \hat A = + \left(\begin{array}{ccc} + A_s & 0 & 0 \\ 0 & A_s & 0 \\ 0 & 0 & A_s + \end{array}\right) +@f} +where $A_s$ is a Laplace matrix of size equal to the number of shape functions +associated with each component of the vector-valued velocity. With this +matrix, one could be tempted to define our preconditioner for the +velocity matrix A as follows: +@f{eqnarray*} + \tilde A^{-1} = + \left(\begin{array}{ccc} + \tilde A_s^{-1} & 0 & 0 \\ + 0 & \tilde A_s^{-1} & 0 \\ + 0 & 0 & \tilde A_s^{-1} + \end{array}\right), +@f} +where $\tilde A_s^{-1}$ is a preconditioner for the Laplace matrix — +something where we know very well how to build good preconditioners! + +In reality, the story is not quite as simple: To make the matrix +$\tilde A$ definite, we need to make the individual blocks $\tilde +A_s$ definite by applying boundary conditions. One can try to do so by +applying Dirichlet boundary conditions all around the boundary, and +then the so-defined preconditioner $\tilde A^{-1}$ turns out to be a +good preconditioner for A if the latter matrix results from a Stokes +problem where we also have Dirichlet boundary conditions on the +velocity components all around the domain, i.e. if we enforce u=0. + +Unfortunately, this "if" is an "if and only if": in the program below +we will want to use no-flux boundary conditions of the form $\mathbf u +\cdot \mathbf n = 0$ (i.e. flow parallel to the boundary is allowed, +but no flux through the boundary). In this case, it turns out that the +block diagonal matrix defined above is not a good preconditioner +because it neglects the coupling of components at the boundary. A +better way to do things is therefore if we build the matrix $\hat A$ +as the vector Laplace matrix $\hat A_{ij} = (\nabla {\mathbf v}_i, +\eta \nabla {\mathbf v}_j)$ and then apply the same boundary condition +as we applied to A. If this is Dirichlet boundary conditions all +around the domain, the $\hat A$ will decouple to three diagonal blocks +as above, and if the boundary conditions are of the form $\mathbf u +\cdot \mathbf n = 0$ then this will introduce a coupling of degrees of +freedom at the boundary but only there. This, in fact, turns out to be +a much better preconditioner than the one introduced above, and has +almost all the benefits of what we hoped to get. + + +To sum this whole story up, we can observe: +
    +
  • Compared to building a preconditioner from the original matrix A + resulting from the symmetric gradient as we did in @ref step_22 "step-22", + we have to expect that the preconditioner based on the Laplace bilinear form + performs worse since it does not take into account the coupling between + vector components. + +
  • On the other hand, preconditioners for the Laplace matrix are typically + more mature and perform better than ones for vector problems. For example, + at the time of this writing, Algebraic Multigrid (AMG) algorithms are very + well developed for scalar problems, but not so for vector problems. + +
  • In building this preconditioner, we will have to build up the + matrix $\hat A$ and its preconditioner. While this means that we + have to store an additional matrix we didn't need before, the + preconditioner $\tilde A_s^{-1}$ is likely going to need much less + memory than storing a preconditioner for the coupled matrix + A. This is because the matrix $A_s$ has only a third of the + entries per row for all rows corresponding to interior degrees of + freedom, and contains coupling between vector components only on + those parts of the boundary where the boundary conditions introduce + such a coupling. Storing the matrix is therefore comparatively + cheap, and we can expect that computing and storing the + preconditioner $\tilde A_s$ will also be much cheaper compared to + doing so for the fully coupled matrix. +
+ + + +
Linear solvers for the temperature equation
+ +This is the easy part: The matrix for the temperature equation has the form +$\alpha M + \beta A$, where $M,A$ are mass and stiffness matrices on the +temperature space, and $\alpha,\beta$ are constants related the time stepping +scheme and the current and previous time step. This being the sum of a +symmetric positive definite and a symmetric positive semidefinite matrix, the +result is also symmetric positive definite. Furthermore, $\frac\beta\alpha$ is +a number proportional to the time step, and so becomes small whenever the mesh +is fine, damping the effect of the then ill-conditioned stiffness matrix. + +As a consequence, inverting this matrix with the Conjugate Gradient algorithm, +using a simple preconditioner, is trivial and very cheap compared to inverting +the Stokes matrix. + + + +

Implementation details

+ +One of the things worth explaining up front about the program below is the use +of two different DoFHandler objects. If one looks at the structure of the +equations above and the scheme for their solution, one realizes that there is +little commonality that keeps the Stokes part and the temperature part +together. In all previous tutorial programs in which we have discussed @ref +vector_valued "vector-valued problems" we have always only used a single +finite element with several vector components, and a single DoFHandler object. +Sometimes, we have substructured the resulting matrix into blocks to +facilitate particular solver schemes; this was, for example, the case in the +@ref step_22 "step-22" program for the Stokes equations upon which the current +program is based. + +We could of course do the same here. The linear system that we would get would +look like this: +@f{eqnarray*} + \left(\begin{array}{ccc} + A & B^T & 0 \\ B & 0 &0 \\ C & 0 & K + \end{array}\right) + \left(\begin{array}{ccc} + U^n \\ P^n \\ T^n + \end{array}\right) + = + \left(\begin{array}{ccc} + F_U(T^{n-1}) \\ 0 \\ F_T(U^n,T^{n-1},T^{n-1}) + \end{array}\right). +@f} +The problem with this is: We never use the whole matrix at the same time. In +fact, it never really exists at the same time: As explained above, $K$ and +$F_T$ depend on the already computed solution $U^n$, in the first case through +the time step (that depends on $U^n$ because it has to satisfy a CFL +condition). So we can only assemble it once we've already solved the top left +$2\times 2$ block Stokes system, and once we've moved on to the temperature +equation we don't need the Stokes part any more. Furthermore, we don't +actually build the matrix $C$: Because by the time we get to the temperature +equation we already know $U^n$, and because we have to assemble the right hand +side $F_T$ at this time anyway, we simply move the term $CU^n$ to the right +hand side and assemble it along with all the other terms there. What this +means is that there does not remain a part of the matrix where temperature +variables and Stokes variables couple, and so a global enumeration of all +degrees of freedom is no longer important: It is enough if we have an +enumeration of all Stokes degrees of freedom, and of all temperature degrees +of freedom independently. + +In essence, there is consequently not much use in putting everything +into a block matrix (though there are of course the same good reasons to do so +for the $2\times 2$ Stokes part), or, for that matter, in putting everything +into the same DoFHandler object. + +But are there downsides to doing so? These exist, though they may not +be obvious at first. The main problem is that if we need to create one global +finite element that contains velocity, pressure, and temperature shape +functions, and use this to initialize the DoFHandler. But we also use this +finite element object to initialize all FEValues or FEFaceValues objects that +we use. This may not appear to be that big a deal, but imagine what happens +when, for example, we evaluate the residual +$ + R_\alpha(T) + = + \left( + \frac{\partial T}{\partial t} + + + {\mathbf u} \cdot \nabla T + - + \nabla \cdot \kappa \nabla T - \gamma + \right) + T^{\alpha-1} +$ +that we need to compute the artificial viscosity $\nu_\alpha(T)|_K$. For +this, we need the Laplacian of the temperature, which we compute using the +tensor of second derivatives (Hessians) of the shape functions (we have to +give the update_hessians flag to the FEValues object for +this). Now, if we have a finite that contains the shape functions for +velocities, pressures, and temperatures, that means that we have to compute +the Hessians of all shape functions, including the many higher order +shape functions for the velocities. That's a lot of computations that we don't +need, and indeed if one were to do that (as we had in an early version of the +program), assembling the right hand side took about a quarter of the overall +compute time. + +So what we will do is to use two different finite element objects, one for the +Stokes components and one for the temperatures. With this come two different +DoFHandlers, two sparsity patterns and two matrices for the Stokes and +temperature parts, etc. And whenever we have to assemble something that +contains both temperature and Stokes shape functions (in particular the right +hand sides of Stokes and temperature equations), then we use two FEValues +objects initialized with two cell iterators that we walk in parallel through +the two DoFHandler objects associated with the same Triangulation object; for +these two FEValues objects, we use of course the same quadrature objects so +that we can iterate over the same set of quadrature points, but each FEValues +object will get update flags only according to what it actually needs to +compute. In particular, when we compute the residual as above, we only ask for +the values of the Stokes shape functions, but also the Hessians of the +temperature shape functions — much cheaper indeed, and as it turns out: +assembling the right hand side of the temperature equation is now a component +of the program that is hardly measurable. + +With these changes, timing the program yields that only the following +operations are relevant for the overall run time: +
    +
  • Solving the Stokes system: 72% of the run time. +
  • Assembling the Stokes preconditioner and computing the algebraic + multigrid hierarchy using the Trilinos ML package: 11% of the + run time. +
  • The function BoussinesqFlowProblem::setup_dofs: 7% + of overall run time. +
  • Assembling the Stokes and temperature right hand side vectors as + well as assembling the matrices: 7%. +
+In essence this means that all bottlenecks apart from the algebraic +multigrid have been removed. diff --git a/deal.II/examples/step-32/doc/results.dox b/deal.II/examples/step-32/doc/results.dox new file mode 100644 index 0000000000..6a2f055b1a --- /dev/null +++ b/deal.II/examples/step-32/doc/results.dox @@ -0,0 +1,275 @@ +

Results

+ + + +

Numerical experiments to determine optimal parameters

+ +The program as is has three parameters that we don't have much of a +theoretical handle on how to choose in an optimal way. These are: +
    +
  • The time step must satisfy a CFL condition + $k\le \min_K \frac{c_kh_K}{\|\mathbf{u}\|_{L^\infty(K)}}$. Here, $c_k$ is + dimensionless, but what is the right value? +
  • In the computation of the artificial viscosity, +@f{eqnarray*} + \nu_\alpha(T)|_K + = + \beta + \|\mathbf{u}\|_{L^\infty(K)} + \min\left\{ + h_K, + h_K^\alpha + \frac{\|R_\alpha(T)\|_{L^\infty(K)}}{c(\mathbf{u},T)} + \right\}, +@f} + with $c(\mathbf{u},T) = + c_R\ \|\mathbf{u}\|_{L^\infty(\Omega)} \ \mathrm{var}(T) + \ |\mathrm{diam}(\Omega)|^{\alpha-2}$. + Here, the choice of the dimensionless numbers $\beta,c_R$ is of + interest. +
+In all of these cases, we will have to expect that the correct choice of each +value depends on that of the others, and most likely also on the space +dimension and polynomial degree of the finite element used for the +temperature. Below we'll discuss a few numerical experiments to choose +constants. + + +

Choosing ck and β

+ +These two constants are definitely linked in some way. The reason is easy to +see: In the case of a pure advection problem, +$\frac{\partial T}{\partial t} + \mathbf{u}\cdot\nabla T = \gamma$, any +explicit scheme has to satisfy a CFL condition of the form +$k\le \min_K \frac{c_k^a h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$. On the other hand, +for a pure diffusion problem, +$\frac{\partial T}{\partial t} + \nu \Delta T = \gamma$, +explicit schemes need to satisfy a condition +$k\le \min_K \frac{c_k^d h_K^2}{\nu}$. So given the form of $\nu$ above, an +advection diffusion problem like the one we have to solve here will result in +a condition of the form +$ +k\le \min_K \min \left\{ + \frac{c_k^a h_K}{\|\mathbf{u}\|_{L^\infty(K)}}, + \frac{c_k^d h_K^2}{\beta \|mathbf{u}\|_{L^\infty(K)} h_K}\right\} + = + \min_K \left( \min \left\{ + c_k^a, + \frac{c_k^d}{\beta}\right\} + \frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}} \right) +$. +It follows that we have to face the fact that we might want to choose $\beta$ +larger to improve the stability of the numerical scheme (by increasing the +amount of artificial diffusion), but we have to pay a price in the form of +smaller, and consequently more time steps. In practice, one would therefore +like to choose $\beta$ as small as possible to keep the transport problem +sufficiently stabilized while at the same time trying to choose the time step +as large as possible to reduce the overall amount of work. + +The find the right balance, the only way is to do a few computational +experiments. Here's what we did: We modified the program slightly to allow +less mesh refinement (so we don't always have to wait that long) and to choose +$ + \nu(T)|_K + = + \beta + \|\mathbf{u}\|_{L^\infty(K)} h_K +$ to eliminate the effect of of the constant $c_R$. We then run the program +for different values $c_k,\beta$ and observe maximal and minimal temperatures +in the domain. What we expect to see is this: If we choose the time step too +big (i.e. choose a $c_k$ bigger than theoretically allowed) then we will get +exponential growth of the temperature. If we choose $\beta$ too small, then +the transport stabilization becomes insufficient and the solution will show +significant oscillations but not exponential growth. + + +
Results for Q1 elements
+ +Here is what we get for +$\beta=0.01, \beta=0.1$, and $\beta=0.5$, different choices of $c_k$, and +bilinear elements (temperature_degree=1) in 2d: + + + + + + + + + + + +
+ @image html "step-33.timestep.q1.beta=0.01.png" "" width=4cm + + @image html "step-33.timestep.q1.beta=0.03.png" "" width=4cm +
+ @image html "step-33.timestep.q1.beta=0.1.png" "" width=4cm + + @image html "step-33.timestep.q1.beta=0.5.png" "" width=4cm +
+ +The way to interpret these graphs goes like this: for $\beta=0.01$ and +$c_k=\frac 12,\frac 14$, we see exponential growth or at least large +variations, but if we choose +$k=\frac 18\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ +or smaller, then the scheme is +stable though a bit wobbly. For more artificial diffusion, we can choose +$k=\frac 14\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ +or smaller for $\beta=0.03$, +$k=\frac 13\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ +or smaller for $\beta=0.1$, and again need +$k=\frac 1{15}\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ +for $\beta=0.5$ (this time because much diffusion requires a small time +step). + +So how to choose? If we were simply interested in a large time step, then we +would go with $\beta=0.1$ and +$k=\frac 13\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$. +On the other hand, we're also interested in accuracy and here it may be of +interest to actually investigate what these curves show. To this end note that +we start with a zero temperature and that our sources are positive — so +we would intuitively expect that the temperature can never drop below +zero. But it does, a consequence of Gibb's phenomenon when using continuous +elements to approximate a discontinuous solution. We can therefore see that +choosing $\beta$ too small is bad: too little artificial diffusion leads to +over- and undershoots that aren't diffused away. On the other hand, for large +$\beta$, the minimum temperature drops below zero at the beginning but then +quickly diffuses back to zero. + +On the other hand, let's also look at the maximum temperature. Watching the +movie of the solution, we see that initially the fluid is at rest. The source +keeps heating the same volume of fluid whose temperature increases linearly at +the beginning until its buoyancy is able to move it upwards. The hottest part +of the fluid is therefore transported away from the solution and fluid taking +its place is heated for only a short time before being moved out of the source +region, therefore remaining cooler than the initial bubble. If $\kappa=0$ +(in the program it is nonzero but very small) then the hottest part of the +fluid should be advected along with the flow with its temperature +constant. That's what we can see in the graphs with the smallest $\beta$: Once +the maximum temperature is reached, it hardly changes any more. On the other +hand, the larger the artificial diffusion, the more the hot spot is +diffused. Note that for this criterion, the time step size does not play a +significant role. + +So to sum up, likely the best choice would appear to be $\beta=0.03$ +and $k=\frac 14\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$. The curve is +a bit wobbly, but overall pictures looks pretty reasonable with the +exception of some over and undershoots close to the start time due to +Gibb's phenomenon. + + +
Results for Q2 elements
+ +One can repeat the same sequence of experiments for higher order +elements as well. Here are the graphs for bi-quadratic shape functions +(temperature_degree=2) for the temperature, while we +retain the $Q_2/Q_1$ stable Taylor-Hood element for the Stokes system: + + + + + + + + + + +
+ @image html "step-33.timestep.q2.beta=0.01.png" "" width=4cm + + @image html "step-33.timestep.q2.beta=0.03.png" "" width=4cm +
+ @image html "step-33.timestep.q2.beta=0.1.png" "" width=4cm +
+ +Again, small values of $\beta$ lead to less diffusion but we have to +choose the time step very small to keep things under control. Too +large values of $\beta$ make for more diffusion, but again require +small time steps. The best value would appear to be $\beta=0.03$, as +for the $Q_1$ element, and the we have to choose +$k=\frac 18\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ — exactly +half the size for the $Q_1$ element, a fact that may not be surprising +if we state the CFL condition as the requirement that the time step be +small enough so that the distance transport advects in each time step +is no longer than one grid point away (which for $Q_1$ elements +is $h_K$, but for $Q_2$ elements is $h_K/2$). + + +
Results for 3d
+ +One can repeat these experiments in 3d and find the optimal time step +for each value of $\beta$ and find the best value of $\beta$. What one +finds is that for the same $\beta$ already used in 2d, the time steps +needs to be a bit small, by around a factor of 1.2 or so. This is +easily explained: the time step restriction is +$k=\min_K \frac{ch_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ where $h_K$ is +the diameter of the cell. However, what is really needed is the +distance between mesh points, which is $\frac{h_K}{\sqrt{d}}$. So a +more appropriate form would be +$k=\min_K \frac{ch_K}{\|\mathbf{u}\|_{L^\infty(K)}\sqrt{d}}$. + +The second find is that one needs to choose $\beta$ slightly bigger +(about $\beta=0.05$ or so). This then again reduces the time step we +can take. + + + + +
Conclusions
+ +Concluding, $\beta=0.03$ appears to be a good choice for the +stabilization parameter in 2d, and $\beta=0.05$ in 3d. In a dimension +independent way, we can model this as $\beta=0.015d$. As we have seen +in the sections above, in 2d +$k=\frac 14 \frac 1{q_T}\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ +is an appropriate time step, where $q_T$ is the polynomial degree of +the temperature shape functions (in the program, this corresponds to +the variable temperature_degree). To reconcile this with +the findings in 3d for the same $\beta$, we could write this as +$k=\frac 1{2\sqrt{2}\sqrt{d}} \frac +1{q_T}\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ +but this doesn't take into account that we also have to increase +$\beta$ in 3d. The final form that takes all these factors in reads as +follows: +@f{eqnarray*} + k = + \frac 1{2\sqrt{2}} \frac 1{\sqrt{d}} + \frac 2d + \frac 1{q_T} + \frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}} + = + \frac 1{d\sqrt{2}\sqrt{d}} + \frac 1{q_T} + \frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}. +@f} +In the first form (in the center of the equation), $\frac +1{2\sqrt{2}}$ is a universal constant, $\frac 1{\sqrt{d}}$ +is the factor that accounts for the difference between cell diameter +and grid point separation, +$\frac 2d$ accounts for the increase in $\beta$ with space dimension, +$\frac 1{q_T}$ accounts for the distance between grid points for +higher order elements, and $\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ +for the local speed of transport relative to the cell size. This is +the formula that we use in the program. + +As for the question of whether to use $Q_1$ or $Q_2$ elements for the +temperature, the following considerations may be useful: First, +solving the temperature equation is hardly a factor in the overall +scheme since almost the entire compute time goes into solving the +Stokes system in each time step. Higher order elements for the +temperature equation are therefore not a significant drawback. On the +other hand, if one compares the size of the over- and undershoots the +solution produces due to the discontinuous source description, one +notices that for the choice of $\beta$ and $k$ as above, the $Q_1$ +solution dips down to around $-0.47$, whereas the $Q_2$ solution only +goes to $-0.13$ (remember that the exact solution should never become +negative at all. This means that the $Q_2$ solution is significantly +more accurate; the program therefore uses these higher order elements, +despite the penalty we pay in terms of smaller time steps. + + +

Possible extensions

+ +Parallelization -> step-33 + diff --git a/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.01.png b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.01.png new file mode 100644 index 0000000000..2eae67febb Binary files /dev/null and b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.01.png differ diff --git a/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.03.png b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.03.png new file mode 100644 index 0000000000..7bda4fa169 Binary files /dev/null and b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.03.png differ diff --git a/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.1.png b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.1.png new file mode 100644 index 0000000000..702bcf99bc Binary files /dev/null and b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.1.png differ diff --git a/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.5.png b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.5.png new file mode 100644 index 0000000000..1cff898c42 Binary files /dev/null and b/deal.II/examples/step-32/doc/step-33.timestep.q1.beta=0.5.png differ diff --git a/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.01.png b/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.01.png new file mode 100644 index 0000000000..4e58fcd2a6 Binary files /dev/null and b/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.01.png differ diff --git a/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.03.png b/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.03.png new file mode 100644 index 0000000000..9517f80936 Binary files /dev/null and b/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.03.png differ diff --git a/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.1.png b/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.1.png new file mode 100644 index 0000000000..0c56926753 Binary files /dev/null and b/deal.II/examples/step-32/doc/step-33.timestep.q2.beta=0.1.png differ diff --git a/deal.II/examples/step-32/step-31.cc b/deal.II/examples/step-32/step-31.cc new file mode 100644 index 0000000000..2936194255 --- /dev/null +++ b/deal.II/examples/step-32/step-31.cc @@ -0,0 +1,2116 @@ +/* $Id$ */ +/* Author: Wolfgang Bangerth, Texas A&M University, 2007 */ + +/* $Id$ */ +/* */ +/* Copyright (C) 2007, 2008 by the deal.II authors */ +/* */ +/* This file is subject to QPL and may not be distributed */ +/* without copyright and license information. Please refer */ +/* to the file deal.II/doc/license.html for the text and */ +/* further information on this license. */ + + // @sect3{Include files} + + // We include the functionality + // of these well-known deal.II + // library files and some C++ + // header files. +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + + + // Next, we import all deal.II + // names into global namespace +using namespace dealii; + + + // @sect3{Equation data} + + // Again, the next stage in the program + // is the definition of the equation + // data, that is, the various + // boundary conditions, the right hand + // side and the initial condition (remember + // that we're about to solve a time- + // dependent system). The basic strategy + // for this definition is the same as in + // step-22. Regarding the details, though, + // there are some differences. + + // The first + // thing is that we don't set any boundary + // conditions on the velocity, as is + // explained in the introduction. So + // what is left are two conditions for + // pressure p and temperature + // T. + + // Secondly, we set an initial + // condition for all problem variables, + // i.e., for u, p and T, + // so the function has dim+2 + // components. + // In this case, we choose a very simple + // test case, where everything is zero. + + // @sect4{Boundary values} +namespace EquationData +{ + // define viscosity + const double eta = 1; + const double kappa = 1e-6; + + template + class PressureBoundaryValues : public Function + { + public: + PressureBoundaryValues () : Function(1) {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; + + + template + double + PressureBoundaryValues::value (const Point &/*p*/, + const unsigned int /*component*/) const + { + return 0; + } + + + + + + // @sect4{Initial values} + template + class TemperatureInitialValues : public Function + { + public: + TemperatureInitialValues () : Function(1) {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + + virtual void vector_value (const Point &p, + Vector &value) const; + }; + + + template + double + TemperatureInitialValues::value (const Point &, + const unsigned int) const + { + return 0; + } + + + template + void + TemperatureInitialValues::vector_value (const Point &p, + Vector &values) const + { + for (unsigned int c=0; cn_components; ++c) + values(c) = TemperatureInitialValues::value (p, c); + } + + + + // @sect4{Right hand side} + // + // The last definition of this kind + // is the one for the right hand + // side function. Again, the content + // of the function is very + // basic and zero in most of the + // components, except for a source + // of temperature in some isolated + // regions near the bottom of the + // computational domain, as is explained + // in the problem description in the + // introduction. + template + class TemperatureRightHandSide : public Function + { + public: + TemperatureRightHandSide () : Function(1) {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + + virtual void vector_value (const Point &p, + Vector &value) const; + }; + + + template + double + TemperatureRightHandSide::value (const Point &p, + const unsigned int /*component*/) const + { + static const Point source_centers[3] + = { (dim == 2 ? Point(.3,.1) : Point(.3,.5,.1)), + (dim == 2 ? Point(.45,.1) : Point(.45,.5,.1)), + (dim == 2 ? Point(.75,.1) : Point(.75,.5,.1)) }; + static const double source_radius + = (dim == 2 ? 1./32 : 1./8); + + return ((source_centers[0].distance (p) < source_radius) + || + (source_centers[1].distance (p) < source_radius) + || + (source_centers[2].distance (p) < source_radius) + ? + 1 + : + 0); + } + + + template + void + TemperatureRightHandSide::vector_value (const Point &p, + Vector &values) const + { + for (unsigned int c=0; cn_components; ++c) + values(c) = TemperatureRightHandSide::value (p, c); + } +} + + + + // @sect3{Linear solvers and preconditioners} + + // This section introduces some + // objects that are used for the + // solution of the linear equations of + // Stokes system that we need to + // solve in each time step. The basic + // structure is still the same as + // in step-20, where Schur complement + // based preconditioners and solvers + // have been introduced, with the + // actual interface taken from step-22. +namespace LinearSolvers +{ + + // @sect4{The InverseMatrix class template} + + // This class is an interface to + // calculate the action of an + // "inverted" matrix on a vector + // (using the vmult + // operation) + // in the same way as the corresponding + // function in step-22: when the + // product of an object of this class + // is requested, we solve a linear + // equation system with that matrix + // using the CG method, accelerated + // by a preconditioner of (templated) class + // Preconditioner. + template + class InverseMatrix : public Subscriptor + { + public: + InverseMatrix (const Matrix &m, + const Preconditioner &preconditioner); + + + void vmult (TrilinosWrappers::Vector &dst, + const TrilinosWrappers::Vector &src) const; + + private: + const SmartPointer matrix; + const Preconditioner &preconditioner; + }; + + + template + InverseMatrix::InverseMatrix (const Matrix &m, + const Preconditioner &preconditioner) + : + matrix (&m), + preconditioner (preconditioner) + {} + + + + template + void InverseMatrix::vmult ( + TrilinosWrappers::Vector &dst, + const TrilinosWrappers::Vector &src) const + { + SolverControl solver_control (src.size(), 1e-6*src.l2_norm()); + SolverCG cg (solver_control); + + dst = 0; + + try + { + cg.solve (*matrix, dst, src, preconditioner); + } + catch (std::exception &e) + { + Assert (false, ExcMessage(e.what())); + } + } + + // @sect4{Schur complement preconditioner} + + // This is the implementation + // of the Schur complement + // preconditioner as described + // in the section on improved + // solvers in step-22. + // + // The basic + // concept of the preconditioner is + // different to the solution + // strategy used in step-20 and + // step-22. There, the Schur + // complement was used for a + // two-stage solution of the linear + // system. Recall that the process + // in the Schur complement solver is + // a Gaussian elimination of + // a 2x2 block matrix, where each + // block is solved iteratively. + // Here, the idea is to let + // an iterative solver act on the + // whole system, and to use + // a Schur complement for + // preconditioning. As usual when + // dealing with preconditioners, we + // don't intend to exacly set up a + // Schur complement, but rather use + // a good approximation to the + // Schur complement for the purpose of + // preconditioning. + // + // So the question is how we can + // obtain a good preconditioner. + // Let's have a look at the + // preconditioner matrix P + // acting on the block system, built + // as + // @f{eqnarray*} + // P^{-1} + // = + // \left(\begin{array}{cc} + // A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1} + // \end{array}\right) + // @f} + // using the Schur complement + // $S = B A^{-1} B^T$. If we apply + // this matrix in the solution of + // a linear system, convergence of + // an iterative Krylov-based solver + // will be governed by the matrix + // @f{eqnarray*} + // P^{-1}\left(\begin{array}{cc} + // A & B^T \\ B & 0 + // \end{array}\right) + // = + // \left(\begin{array}{cc} + // I & A^{-1} B^T \\ 0 & 0 + // \end{array}\right), + // @f} + // which turns out to be very simple. + // A GMRES solver based on exact + // matrices would converge in two + // iterations, since there are + // only two distinct eigenvalues. + // Such a preconditioner for the + // blocked Stokes system has been + // proposed by Silvester and Wathen, + // Fast iterative solution of + // stabilised Stokes systems part II. + // Using general block preconditioners. + // (SIAM J. Numer. Anal., 31 (1994), + // pp. 1352-1367). + // + // The deal.II users who have already + // gone through the step-20 and step-22 + // tutorials can certainly imagine + // how we're going to implement this. + // We replace the inverse matrices + // in $P^{-1}$ using the InverseMatrix + // class, and the inverse Schur + // complement will be approximated + // by the pressure mass matrix $M_p$. + // Having this in mind, we define a + // preconditioner class with a + // vmult functionality, + // which is all we need for the + // interaction with the usual solver + // functions further below in the + // program code. + // + // First the declarations. These + // are similar to the definition of + // the Schur complement in step-20, + // with the difference that we need + // some more preconditioners in + // the constructor. + template + class BlockSchurPreconditioner : public Subscriptor + { + public: + BlockSchurPreconditioner ( + const TrilinosWrappers::BlockSparseMatrix &S, + const InverseMatrix &Mpinv, + const PreconditionerA &Apreconditioner); + + void vmult (TrilinosWrappers::BlockVector &dst, + const TrilinosWrappers::BlockVector &src) const; + + private: + const SmartPointer stokes_matrix; + const SmartPointer > m_inverse; + const PreconditionerA &a_preconditioner; + + mutable TrilinosWrappers::Vector tmp; + +}; + + + + template + BlockSchurPreconditioner:: + BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S, + const InverseMatrix &Mpinv, + const PreconditionerA &Apreconditioner) + : + stokes_matrix (&S), + m_inverse (&Mpinv), + a_preconditioner (Apreconditioner), + tmp (stokes_matrix->block(1,1).row_map) + {} + + + // This is the vmult + // function. We implement + // the action of $P^{-1}$ as described + // above in three successive steps. + // The first step multiplies + // the velocity vector by a + // preconditioner of the matrix A. + // The resuling velocity vector + // is then multiplied by $B$ and + // subtracted from the pressure. + // This second step only acts on + // the pressure vector and is + // accomplished by the command + // SparseMatrix::residual. Next, + // we change the sign in the + // temporary pressure vector and + // finally multiply by the pressure + // mass matrix to get the final + // pressure vector. + template + void BlockSchurPreconditioner::vmult ( + TrilinosWrappers::BlockVector &dst, + const TrilinosWrappers::BlockVector &src) const + { + a_preconditioner.vmult (dst.block(0), src.block(0)); + stokes_matrix->block(1,0).residual(tmp, dst.block(0), src.block(1)); + tmp *= -1; + m_inverse->vmult (dst.block(1), tmp); + } +} + + + + // @sect3{The BoussinesqFlowProblem class template} + + // The definition of this class is + // mainly based on the step-22 tutorial + // program. Most of the data types are + // the same as there. However, we + // deal with a time-dependent system now, + // and there is temperature to take care + // of as well, so we need some additional + // function and variable declarations. + // Furthermore, we have a slightly more + // sophisticated solver we are going to + // use, so there is a second pointer + // to a sparse ILU for a pressure + // mass matrix as well. +template +class BoussinesqFlowProblem +{ + public: + BoussinesqFlowProblem (); + void run (); + + private: + void setup_dofs (); + void assemble_stokes_preconditioner (); + void build_stokes_preconditioner (); + void assemble_stokes_system (); + void assemble_temperature_system (); + void assemble_temperature_matrix (); + double get_maximal_velocity () const; + std::pair get_extrapolated_temperature_range () const; + void solve (); + void output_results () const; + void refine_mesh (const unsigned int max_grid_level); + + static + double + compute_viscosity(const std::vector &old_temperature, + const std::vector &old_old_temperature, + const std::vector > &old_temperature_grads, + const std::vector > &old_old_temperature_grads, + const std::vector > &old_temperature_hessians, + const std::vector > &old_old_temperature_hessians, + const std::vector > &present_stokes_values, + const std::vector &gamma_values, + const double global_u_infty, + const double global_T_variation, + const double global_Omega_diameter, + const double cell_diameter, + const double old_time_step); + + + Epetra_SerialComm trilinos_communicator; + + Triangulation triangulation; + + const unsigned int stokes_degree; + FESystem stokes_fe; + DoFHandler stokes_dof_handler; + ConstraintMatrix stokes_constraints; + + std::vector stokes_partitioner; + TrilinosWrappers::BlockSparseMatrix stokes_matrix; + TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; + + TrilinosWrappers::BlockVector stokes_solution; + TrilinosWrappers::BlockVector stokes_rhs; + + + const unsigned int temperature_degree; + FE_Q temperature_fe; + DoFHandler temperature_dof_handler; + ConstraintMatrix temperature_constraints; + + Epetra_Map temperature_partitioner; + TrilinosWrappers::SparseMatrix temperature_mass_matrix; + TrilinosWrappers::SparseMatrix temperature_stiffness_matrix; + TrilinosWrappers::SparseMatrix temperature_matrix; + + TrilinosWrappers::Vector temperature_solution; + TrilinosWrappers::Vector old_temperature_solution; + TrilinosWrappers::Vector old_old_temperature_solution; + TrilinosWrappers::Vector temperature_rhs; + + + double time_step; + double old_time_step; + unsigned int timestep_number; + + boost::shared_ptr Amg_preconditioner; + boost::shared_ptr Mp_preconditioner; + + bool rebuild_stokes_matrix; + bool rebuild_temperature_matrices; + bool rebuild_stokes_preconditioner; +}; + + + // @sect3{BoussinesqFlowProblem class implementation} + + // @sect4{BoussinesqFlowProblem::BoussinesqFlowProblem} + // + // The constructor of this class is + // an extension of the constructor + // in step-22. We need to include + // the temperature in the definition + // of the finite element. As discussed + // in the introduction, we are going + // to use discontinuous elements + // of one degree less than for pressure + // there. Moreover, we initialize + // the time stepping as well as the + // options for the matrix assembly + // and preconditioning. +template +BoussinesqFlowProblem::BoussinesqFlowProblem () + : + triangulation (Triangulation::maximum_smoothing), + + stokes_degree (1), + stokes_fe (FE_Q(stokes_degree+1), dim, + FE_Q(stokes_degree), 1), + stokes_dof_handler (triangulation), + + temperature_degree (2), + temperature_fe (temperature_degree), + temperature_dof_handler (triangulation), + + temperature_partitioner (0, 0, trilinos_communicator), + + time_step (0), + old_time_step (0), + timestep_number (0), + rebuild_stokes_matrix (true), + rebuild_temperature_matrices (true), + rebuild_stokes_preconditioner (true) +{} + + + + // @sect4{BoussinesqFlowProblem::get_maximal_velocity} +template +double BoussinesqFlowProblem::get_maximal_velocity () const +{ + const QGauss quadrature_formula(stokes_degree+2); + const unsigned int n_q_points = quadrature_formula.size(); + + FEValues fe_values (stokes_fe, quadrature_formula, update_values); + std::vector > stokes_values(n_q_points, + Vector(dim+1)); + double max_velocity = 0; + + typename DoFHandler::active_cell_iterator + cell = stokes_dof_handler.begin_active(), + endc = stokes_dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + fe_values.get_function_values (stokes_solution, stokes_values); + + for (unsigned int q=0; q velocity; + for (unsigned int i=0; i +std::pair +BoussinesqFlowProblem::get_extrapolated_temperature_range () const +{ + QGauss quadrature_formula(temperature_degree+2); + const unsigned int n_q_points = quadrature_formula.size(); + + FEValues fe_values (temperature_fe, quadrature_formula, + update_values); + std::vector old_temperature_values(n_q_points); + std::vector old_old_temperature_values(n_q_points); + + double min_temperature = (1. + time_step/old_time_step) * + old_temperature_solution.linfty_norm() + + + time_step/old_time_step * + old_old_temperature_solution.linfty_norm(), + max_temperature = -min_temperature; + + typename DoFHandler::active_cell_iterator + cell = temperature_dof_handler.begin_active(), + endc = temperature_dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + fe_values.get_function_values (old_temperature_solution, old_temperature_values); + fe_values.get_function_values (old_old_temperature_solution, old_old_temperature_values); + + for (unsigned int q=0; q +double +BoussinesqFlowProblem:: +compute_viscosity(const std::vector &old_temperature, + const std::vector &old_old_temperature, + const std::vector > &old_temperature_grads, + const std::vector > &old_old_temperature_grads, + const std::vector > &old_temperature_hessians, + const std::vector > &old_old_temperature_hessians, + const std::vector > &present_stokes_values, + const std::vector &gamma_values, + const double global_u_infty, + const double global_T_variation, + const double global_Omega_diameter, + const double cell_diameter, + const double old_time_step) +{ + const double beta = 0.015 * dim; + const double alpha = 1; + + if (global_u_infty == 0) + return 5e-3 * cell_diameter; + + const unsigned int n_q_points = old_temperature.size(); + + // Stage 1: calculate residual + double max_residual = 0; + double max_velocity = 0; + + for (unsigned int q=0; q < n_q_points; ++q) + { + Tensor<1,dim> u; + for (unsigned int d=0; dsetup_matrices + // that decides whether to + // recreate the sparsity pattern + // and the associated stiffness + // matrix. + // + // The body starts by assigning dofs on + // basis of the chosen finite element, + // and then renumbers the dofs + // first using the Cuthill_McKee + // algorithm (to generate a good + // quality ILU during the linear + // solution process) and then group + // components of velocity, pressure + // and temperature together. This + // happens in complete analogy to + // step-22. + // + // We then proceed with the generation + // of the hanging node constraints + // that arise from adaptive grid + // refinement. Next we impose + // the no-flux boundary conditions + // $\vec{u}\cdot \vec{n}=0$ by adding + // a respective constraint to the + // hanging node constraints + // matrix. The second parameter in + // the function describes the first + // of the velocity components + // in the total dof vector, which is + // zero here. The parameter + // no_normal_flux_boundaries + // sets the no flux b.c. to those + // boundaries with boundary indicator + // zero. +template +void BoussinesqFlowProblem::setup_dofs () +{ + std::vector stokes_block_component (dim+1,0); + stokes_block_component[dim] = 1; + + { + stokes_dof_handler.distribute_dofs (stokes_fe); + DoFRenumbering::Cuthill_McKee (stokes_dof_handler); + DoFRenumbering::component_wise (stokes_dof_handler, stokes_block_component); + + stokes_constraints.clear (); + DoFTools::make_hanging_node_constraints (stokes_dof_handler, + stokes_constraints); + std::set no_normal_flux_boundaries; + no_normal_flux_boundaries.insert (0); + VectorTools::compute_no_normal_flux_constraints (stokes_dof_handler, 0, + no_normal_flux_boundaries, + stokes_constraints); + stokes_constraints.close (); + } + { + temperature_dof_handler.distribute_dofs (temperature_fe); + DoFRenumbering::Cuthill_McKee (temperature_dof_handler); + + temperature_constraints.clear (); + DoFTools::make_hanging_node_constraints (temperature_dof_handler, + temperature_constraints); + temperature_constraints.close (); + } + + std::vector stokes_dofs_per_block (2); + DoFTools::count_dofs_per_block (stokes_dof_handler, stokes_dofs_per_block, + stokes_block_component); + + const unsigned int n_u = stokes_dofs_per_block[0], + n_p = stokes_dofs_per_block[1], + n_T = temperature_dof_handler.n_dofs(); + + std::cout << "Number of active cells: " + << triangulation.n_active_cells() + << " (on " + << triangulation.n_levels() + << " levels)" + << std::endl + << "Number of degrees of freedom: " + << n_u + n_p + n_T + << " (" << n_u << '+' << n_p << '+'<< n_T <<')' + << std::endl + << std::endl; + + + + // The next step is to + // create the sparsity + // pattern for the system matrix + // based on the Boussinesq + // system. As in step-22, + // we choose to create the + // pattern not as in the + // first tutorial programs, + // but by using the blocked + // version of + // CompressedSetSparsityPattern. + // The reason for doing this + // is mainly a memory issue, + // that is, the basic procedures + // consume too much memory + // when used in three spatial + // dimensions as we intend + // to do for this program. + // + // So, in case we need + // to recreate the matrices, + // we first release the + // stiffness matrix from the + // sparsity pattern and then + // set up an object of the + // BlockCompressedSetSparsityPattern + // consisting of three blocks. + // Each of these blocks is + // initialized with the + // respective number of + // degrees of freedom. + // Once the blocks are + // created, the overall size + // of the sparsity pattern + // is initiated by invoking + // the collect_sizes() + // command, and then the + // sparsity pattern can be + // filled with information. + // Then, the hanging + // node constraints are applied + // to the temporary sparsity + // pattern, which is finally + // then completed and copied + // into the general sparsity + // pattern structure. + + // Observe that we use a + // coupling argument for + // telling the function + // make_stokes_sparsity_pattern + // which components actually + // will hold data and which + // we're going to neglect. + // + // After these actions, we + // need to reassign the + // system matrix structure to + // the sparsity pattern. + stokes_partitioner.clear(); + { + Epetra_Map map_u(n_u, 0, trilinos_communicator); + stokes_partitioner.push_back (map_u); + Epetra_Map map_p(n_p, 0, trilinos_communicator); + stokes_partitioner.push_back (map_p); + } + { + stokes_matrix.clear (); + + BlockCompressedSetSparsityPattern csp (2,2); + + csp.block(0,0).reinit (n_u, n_u); + csp.block(0,1).reinit (n_u, n_p); + csp.block(1,0).reinit (n_p, n_u); + csp.block(1,1).reinit (n_p, n_p); + + csp.collect_sizes (); + + Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); + + // build the sparsity + // pattern. note that all dim + // velocities couple with each + // other and with the pressures, + // but that there is no + // pressure-pressure coupling: + for (unsigned int c=0; c coupling (dim+1, dim+1); + for (unsigned int c=0; c +void +BoussinesqFlowProblem::assemble_stokes_preconditioner () +{ + stokes_preconditioner_matrix = 0; + + QGauss quadrature_formula(stokes_degree+2); + FEValues stokes_fe_values (stokes_fe, quadrature_formula, + update_JxW_values | + update_values | + update_gradients); + const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; + + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); + std::vector local_dof_indices (dofs_per_cell); + + std::vector > phi_grad_u (dofs_per_cell); + std::vector phi_p (dofs_per_cell); + + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + + typename DoFHandler::active_cell_iterator + cell = stokes_dof_handler.begin_active(), + endc = stokes_dof_handler.end(); + for (; cell!=endc; ++cell) + { + stokes_fe_values.reinit (cell); + local_matrix = 0; + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + stokes_constraints.distribute_local_to_global (local_matrix, + local_dof_indices, + stokes_preconditioner_matrix); + } + stokes_preconditioner_matrix.compress(); +} + + + +template +void +BoussinesqFlowProblem::build_stokes_preconditioner () +{ + if (rebuild_stokes_preconditioner == false) + return; + + std::cout << " Rebuilding Stokes preconditioner..." << std::flush; + + + // This last step of the assembly + // function sets up the preconditioners + // used for the solution of the + // system. We are going to use an + // ILU preconditioner for the + // velocity block (to be used + // by BlockSchurPreconditioner class) + // as well as an ILU preconditioner + // for the inversion of the + // pressure mass matrix. Recall that + // the velocity-velocity block sits + // at position (0,0) in the + // global system matrix, and + // the pressure mass matrix in + // (1,1). The + // storage of these objects is + // as in step-22, that is, we + // include them using a + // shared pointer structure from the + // boost library. + assemble_stokes_preconditioner (); + + Amg_preconditioner = boost::shared_ptr + (new TrilinosWrappers::PreconditionAMG()); + + std::vector > null_space; + std::vector velocity_components (dim+1,true); + velocity_components[dim] = false; + DoFTools::extract_constant_modes (stokes_dof_handler, velocity_components, + null_space); + Amg_preconditioner->initialize(stokes_preconditioner_matrix.block(0,0), + true, true, null_space, false); + + // TODO: we could throw away the (0,0) + // block here since things have been + // copied over to Trilinos. we need to + // keep the (1,1) block, though + + Mp_preconditioner = boost::shared_ptr + (new TrilinosWrappers::PreconditionSSOR( + stokes_preconditioner_matrix.block(1,1),1.2)); + + std::cout << std::endl; + + rebuild_stokes_preconditioner = false; +} + + + + // @sect4{BoussinesqFlowProblem::assemble_stokes_system} + // + // The assembly of the Boussinesq + // system is acutally a two-step + // procedure. One is to create + // the Stokes system matrix and + // right hand side for the + // velocity-pressure system as + // well as the mass matrix for + // temperature, and + // the second is to create the + // rhight hand side for the temperature + // dofs. The reason for doing this + // in two steps is simply that + // the time stepping we have chosen + // needs the result from the Stokes + // system at the current time step + // for building the right hand + // side of the temperature equation. + // + // This function does the + // first of these two tasks. + // There are two different situations + // for calling this function. The + // first one is when we reset the + // mesh, and both the matrix and + // the right hand side have to + // be generated. The second situation + // only sets up the right hand + // side. The reason for having + // two different accesses is that + // the matrix of the Stokes system + // does not change in time unless + // the mesh is changed, so we can + // save a considerable amount of + // work by doing the full assembly + // only when it is needed. + // + // Regarding the technical details + // of implementation, not much has + // changed from step-22. We reset + // matrix and vector, create + // a quadrature formula on the + // cells and one on cell faces + // (for implementing Neumann + // boundary conditions). Then, + // we create a respective + // FEValues object for both the + // cell and the face integration. + // For the the update flags of + // the first, we perform the + // calculations of basis function + // derivatives only in + // case of a full assembly, since + // they are not needed otherwise, + // which makes the call of + // the FEValues::reinit function + // further down in the program + // more efficient. + // + // The declarations proceed + // with some shortcuts for + // array sizes, the creation of + // the local matrix and right + // hand side as well as the + // vector for the indices of + // the local dofs compared to + // the global system. +template +void BoussinesqFlowProblem::assemble_stokes_system () +{ + std::cout << " Assembling..." << std::flush; + + if (rebuild_stokes_matrix == true) + stokes_matrix=0; + + stokes_rhs=0; + + QGauss quadrature_formula(stokes_degree+2); + QGauss face_quadrature_formula(stokes_degree+2); + + FEValues stokes_fe_values (stokes_fe, quadrature_formula, + update_values | + update_quadrature_points | + update_JxW_values | + (rebuild_stokes_matrix == true + ? + update_gradients + : + UpdateFlags(0))); + + FEValues temperature_fe_values (temperature_fe, quadrature_formula, + update_values); + + FEFaceValues stokes_fe_face_values (stokes_fe, face_quadrature_formula, + update_values | + update_normal_vectors | + update_quadrature_points | + update_JxW_values); + + const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; + + const unsigned int n_q_points = quadrature_formula.size(); + const unsigned int n_face_q_points = face_quadrature_formula.size(); + + FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); + Vector local_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + // These few declarations provide + // the structures for the evaluation + // of inhomogeneous Neumann boundary + // conditions from the function + // declaration made above. + // The vector old_solution_values + // evaluates the solution + // at the old time level, since + // the temperature from the + // old time level enters the + // Stokes system as a source + // term in the momentum equation. + // + // Then, we create a variable + // to hold the Rayleigh number, + // the measure of buoyancy. + // + // The set of vectors we create + // next hold the evaluations of + // the basis functions that will + // be used for creating the + // matrices. This gives faster + // access to that data, which + // increases the performance + // of the assembly. See step-22 + // for details. + // + // The last few declarations + // are used to extract the + // individual blocks (velocity, + // pressure, temperature) from + // the total FE system. + const EquationData::PressureBoundaryValues pressure_boundary_values; + std::vector boundary_values (n_face_q_points); + + std::vector old_temperature_values(n_q_points); + + const double Rayleigh_number = 10; + + std::vector > phi_u (dofs_per_cell); + std::vector > grads_phi_u (dofs_per_cell); + std::vector div_phi_u (dofs_per_cell); + std::vector phi_p (dofs_per_cell); + + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + + // Now start the loop over + // all cells in the problem. + // The first commands are all + // very familiar, doing the + // evaluations of the element + // basis functions, resetting + // the local arrays and + // getting the values of the + // old solution at the + // quadrature point. Then we + // are ready to loop over + // the quadrature points + // on the cell. + typename DoFHandler::active_cell_iterator + cell = stokes_dof_handler.begin_active(), + endc = stokes_dof_handler.end(); + typename DoFHandler::active_cell_iterator + temperature_cell = temperature_dof_handler.begin_active(); + + for (; cell!=endc; ++cell, ++temperature_cell) + { + stokes_fe_values.reinit (cell); + temperature_fe_values.reinit (temperature_cell); + + local_matrix = 0; + local_rhs = 0; + + temperature_fe_values.get_function_values (old_temperature_solution, old_temperature_values); + + for (unsigned int q=0; qrebuild_matrices + // flag. + for (unsigned int k=0; k gravity = ( (dim == 2) ? (Point (0,1)) : + (Point (0,0,1)) ); + for (unsigned int i=0; i::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + stokes_fe_face_values.reinit (cell, face_no); + + pressure_boundary_values + .value_list (stokes_fe_face_values.get_quadrature_points(), + boundary_values); + + for (unsigned int q=0; q + phi_i_u = stokes_fe_face_values[velocities].value (i, q); + + local_rhs(i) += -(phi_i_u * + stokes_fe_face_values.normal_vector(q) * + boundary_values[q] * + stokes_fe_face_values.JxW(q)); + } + } + + // The last step in the loop + // over all cells is to + // enter the local contributions + // into the global matrix and + // vector structures to the + // positions specified in + // local_dof_indices. + // Again, we only add the + // matrix data when it is + // requested. + cell->get_dof_indices (local_dof_indices); + + if (rebuild_stokes_matrix == true) + stokes_constraints.distribute_local_to_global (local_matrix, + local_dof_indices, + stokes_matrix); + + stokes_constraints.distribute_local_to_global (local_rhs, + local_dof_indices, + stokes_rhs); + } + stokes_matrix.compress(); + stokes_rhs.compress(); + + rebuild_stokes_matrix = false; + + std::cout << std::endl; +} + + + + + + + // @sect4{BoussinesqFlowProblem::assemble_temperature_system} + // + // This function does the second + // part of the assembly work, the + // creation of the velocity-dependent + // right hand side of the + // temperature equation. The + // declarations in this function + // are pretty much the same as the + // ones used in the other + // assembly routine, except that we + // restrict ourselves to vectors + // this time. Though, we need to + // perform more face integrals + // at this point, induced by the + // use of discontinuous elements for + // the temperature (just + // as it was in the first DG + // example in step-12) in combination + // with adaptive grid refinement + // and subfaces. The update + // flags at face level are the + // same as in step-12. +template +void BoussinesqFlowProblem::assemble_temperature_matrix () +{ + if (rebuild_temperature_matrices == false) + return; + + temperature_mass_matrix = 0; + temperature_stiffness_matrix = 0; + + QGauss quadrature_formula(temperature_degree+2); + FEValues temperature_fe_values (temperature_fe, quadrature_formula, + update_values | update_gradients | + update_JxW_values); + + const unsigned int dofs_per_cell = temperature_fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix local_mass_matrix (dofs_per_cell, dofs_per_cell); + FullMatrix local_stiffness_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + std::vector gamma_values (n_q_points); + + std::vector phi_T (dofs_per_cell); + std::vector > grad_phi_T (dofs_per_cell); + + // Now, let's start the loop + // over all cells in the + // triangulation. The first + // actions within the loop + // are, 0as usual, the evaluation + // of the FE basis functions + // and the old and present + // solution at the quadrature + // points. + typename DoFHandler::active_cell_iterator + cell = temperature_dof_handler.begin_active(), + endc = temperature_dof_handler.end(); + for (; cell!=endc; ++cell) + { + local_mass_matrix = 0; + local_stiffness_matrix = 0; + + temperature_fe_values.reinit (cell); + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + + temperature_constraints.distribute_local_to_global (local_mass_matrix, + local_dof_indices, + temperature_mass_matrix); + temperature_constraints.distribute_local_to_global (local_stiffness_matrix, + local_dof_indices, + temperature_stiffness_matrix); + } + + rebuild_temperature_matrices = false; +} + + + + +template +void BoussinesqFlowProblem::assemble_temperature_system () +{ + const bool use_bdf2_scheme = (timestep_number != 0); + + if (use_bdf2_scheme == true) + { + temperature_matrix.copy_from (temperature_mass_matrix); + temperature_matrix *= (2*time_step + old_time_step) / + (time_step + old_time_step); + temperature_matrix.add (time_step, temperature_stiffness_matrix); + } + else + { + temperature_matrix.copy_from (temperature_mass_matrix); + temperature_matrix.add (time_step, temperature_stiffness_matrix); + } + + temperature_rhs = 0; + + QGauss quadrature_formula(temperature_degree+2); + FEValues temperature_fe_values (temperature_fe, quadrature_formula, + update_values | update_gradients | + update_hessians | + update_quadrature_points | update_JxW_values); + FEValues stokes_fe_values (stokes_fe, quadrature_formula, + update_values); + + const unsigned int dofs_per_cell = temperature_fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + Vector local_rhs (dofs_per_cell); + FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + // Here comes the declaration + // of vectors to hold the old + // and present solution values + // and gradients + // for both the cell as well as faces + // to the cell. Next comes the + // declaration of an object + // to hold the temperature + // boundary values and a + // well-known extractor for + // accessing the temperature + // part of the FE system. + std::vector > present_stokes_values (n_q_points, + Vector(dim+1)); + + + std::vector old_temperature_values (n_q_points); + std::vector old_old_temperature_values(n_q_points); + std::vector > old_temperature_grads(n_q_points); + std::vector > old_old_temperature_grads(n_q_points); + std::vector > old_temperature_hessians(n_q_points); + std::vector > old_old_temperature_hessians(n_q_points); + + + EquationData::TemperatureRightHandSide temperature_right_hand_side; + std::vector gamma_values (n_q_points); + + std::vector phi_T (dofs_per_cell); + std::vector > grad_phi_T (dofs_per_cell); + + const double global_u_infty = get_maximal_velocity(); + const std::pair + global_T_range = get_extrapolated_temperature_range(); + const double global_Omega_diameter = GridTools::diameter (triangulation); + + // Now, let's start the loop + // over all cells in the + // triangulation. The first + // actions within the loop + // are, 0as usual, the evaluation + // of the FE basis functions + // and the old and present + // solution at the quadrature + // points. + typename DoFHandler::active_cell_iterator + cell = temperature_dof_handler.begin_active(), + endc = temperature_dof_handler.end(); + typename DoFHandler::active_cell_iterator + stokes_cell = stokes_dof_handler.begin_active(); + + for (; cell!=endc; ++cell, ++stokes_cell) + { + local_rhs = 0; + + temperature_fe_values.reinit (cell); + stokes_fe_values.reinit (stokes_cell); + + temperature_fe_values.get_function_values (old_temperature_solution, + old_temperature_values); + temperature_fe_values.get_function_values (old_old_temperature_solution, + old_old_temperature_values); + + temperature_fe_values.get_function_gradients (old_temperature_solution, + old_temperature_grads); + temperature_fe_values.get_function_gradients (old_old_temperature_solution, + old_old_temperature_grads); + + temperature_fe_values.get_function_hessians (old_temperature_solution, + old_temperature_hessians); + temperature_fe_values.get_function_hessians (old_old_temperature_solution, + old_old_temperature_hessians); + + temperature_right_hand_side.value_list (temperature_fe_values.get_quadrature_points(), + gamma_values); + + stokes_fe_values.get_function_values (stokes_solution, + present_stokes_values); + + const double nu + = compute_viscosity (old_temperature_values, + old_old_temperature_values, + old_temperature_grads, + old_old_temperature_grads, + old_temperature_hessians, + old_old_temperature_hessians, + present_stokes_values, + gamma_values, + global_u_infty, + global_T_range.second - global_T_range.first, + global_Omega_diameter, cell->diameter(), + old_time_step); + + for (unsigned int q=0; q old_grad_T = old_temperature_grads[q]; + const Tensor<1,dim> old_old_grad_T = old_old_temperature_grads[q]; + + + Tensor<1,dim> present_u; + for (unsigned int d=0; dget_dof_indices (local_dof_indices); + temperature_constraints.distribute_local_to_global (local_rhs, + local_dof_indices, + temperature_rhs); + } +} + + + + + // @sect4{BoussinesqFlowProblem::solve} +template +void BoussinesqFlowProblem::solve () +{ + std::cout << " Solving..." << std::endl; + + // Use the BlockMatrixArray structure + // for extracting only the upper left + // 2x2 blocks from the matrix that will + // be used for the solution of the + // blocked system. + { + // Set up inverse matrix for + // pressure mass matrix + LinearSolvers::InverseMatrix + mp_inverse (stokes_preconditioner_matrix.block(1,1), *Mp_preconditioner); + + LinearSolvers::BlockSchurPreconditioner + preconditioner (stokes_matrix, mp_inverse, *Amg_preconditioner); + + // Set up GMRES solver and + // solve. + SolverControl solver_control (stokes_matrix.m(), + 1e-6*stokes_rhs.l2_norm()); + + SolverGMRES gmres(solver_control, + SolverGMRES::AdditionalData(100)); + + //stokes_solution = 0; + gmres.solve(stokes_matrix, stokes_solution, stokes_rhs, preconditioner); + + std::cout << " " + << solver_control.last_step() + << " GMRES iterations for Stokes subsystem." + << std::endl; + + // Produce a constistent solution + // field (we can't do this on the 'up' + // vector since it does not have the + // temperature component, but + // hanging_node_constraints has + // constraints also for the + // temperature vector) + stokes_constraints.distribute (stokes_solution); + } + + old_time_step = time_step; + time_step = 1./(std::sqrt(2.)*dim*std::sqrt(1.*dim)) / + temperature_degree * + GridTools::minimal_cell_diameter(triangulation) / + std::max (get_maximal_velocity(), .01); + + temperature_solution = old_temperature_solution; + + + assemble_temperature_system (); + { + + SolverControl solver_control (temperature_matrix.m(), + 1e-8*temperature_rhs.l2_norm()); + SolverCG cg (solver_control); + + TrilinosWrappers::PreconditionSSOR preconditioner (temperature_matrix, + 1.2); + cg.solve (temperature_matrix, temperature_solution, + temperature_rhs, + preconditioner); + + // produce a consistent temperature field + temperature_constraints.distribute (temperature_solution); + + std::cout << " " + << solver_control.last_step() + << " CG iterations for temperature." + << std::endl; + + double min_temperature = temperature_solution(0), + max_temperature = temperature_solution(0); + for (unsigned int i=0; i (min_temperature, + temperature_solution(i)); + max_temperature = std::max (max_temperature, + temperature_solution(i)); + } + + std::cout << " Temperature range: " + << min_temperature << ' ' << max_temperature + << std::endl; + } +} + + + + // @sect4{BoussinesqFlowProblem::output_results} +template +void BoussinesqFlowProblem::output_results () const +{ + if (timestep_number % 10 != 0) + return; + + const FESystem joint_fe (stokes_fe, 1, + temperature_fe, 1); + DoFHandler joint_dof_handler (triangulation); + joint_dof_handler.distribute_dofs (joint_fe); + Assert (joint_dof_handler.n_dofs() == + stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(), + ExcInternalError()); + + Vector joint_solution (joint_dof_handler.n_dofs()); + + { + std::vector local_joint_dof_indices (joint_fe.dofs_per_cell); + std::vector local_stokes_dof_indices (stokes_fe.dofs_per_cell); + std::vector local_temperature_dof_indices (temperature_fe.dofs_per_cell); + + typename DoFHandler::active_cell_iterator + joint_cell = joint_dof_handler.begin_active(), + joint_endc = joint_dof_handler.end(), + stokes_cell = stokes_dof_handler.begin_active(), + temperature_cell = temperature_dof_handler.begin_active(); + for (; joint_cell!=joint_endc; ++joint_cell, ++stokes_cell, ++temperature_cell) + { + joint_cell->get_dof_indices (local_joint_dof_indices); + stokes_cell->get_dof_indices (local_stokes_dof_indices); + temperature_cell->get_dof_indices (local_temperature_dof_indices); + + for (unsigned int i=0; i joint_solution_names (dim, "velocity"); + joint_solution_names.push_back ("p"); + joint_solution_names.push_back ("T"); + + DataOut data_out; + + data_out.attach_dof_handler (joint_dof_handler); + + std::vector + data_component_interpretation + (dim+2, DataComponentInterpretation::component_is_scalar); + for (unsigned int i=0; i::type_dof_data, + data_component_interpretation); + data_out.build_patches (std::min(stokes_degree, temperature_degree)); + + std::ostringstream filename; + filename << "solution-" << Utilities::int_to_string(timestep_number, 4) << ".vtk"; + + std::ofstream output (filename.str().c_str()); + data_out.write_vtk (output); +} + + + + // @sect4{BoussinesqFlowProblem::refine_mesh} +template +void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) +{ + Vector estimated_error_per_cell (triangulation.n_active_cells()); + + KellyErrorEstimator::estimate (temperature_dof_handler, + QGauss(temperature_degree+1), + typename FunctionMap::type(), + temperature_solution, + estimated_error_per_cell); + + GridRefinement::refine_and_coarsen_fixed_fraction (triangulation, + estimated_error_per_cell, + 0.8, 0.1); + if (triangulation.n_levels() > max_grid_level) + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(max_grid_level); + cell != triangulation.end(); ++cell) + cell->clear_refine_flag (); + + std::vector x_solution (2); + x_solution[0].reinit (temperature_solution); + x_solution[0] = temperature_solution; + x_solution[1].reinit (temperature_solution); + x_solution[1] = old_temperature_solution; + + SolutionTransfer soltrans(temperature_dof_handler); + + triangulation.prepare_coarsening_and_refinement(); + soltrans.prepare_for_coarsening_and_refinement(x_solution); + + triangulation.execute_coarsening_and_refinement (); + setup_dofs (); + + std::vector tmp (2); + tmp[0].reinit (temperature_solution); + tmp[1].reinit (temperature_solution); + soltrans.interpolate(x_solution, tmp); + + temperature_solution = tmp[0]; + old_temperature_solution = tmp[1]; + + rebuild_stokes_matrix = true; + rebuild_temperature_matrices = true; + rebuild_stokes_preconditioner = true; +} + + + + // @sect4{BoussinesqFlowProblem::run} +template +void BoussinesqFlowProblem::run () +{ + const unsigned int initial_refinement = (dim == 2 ? 4 : 2); + const unsigned int n_pre_refinement_steps = (dim == 2 ? 4 : 3); + + + GridGenerator::hyper_cube (triangulation); + triangulation.refine_global (initial_refinement); + + setup_dofs(); + + unsigned int pre_refinement_step = 0; + + start_time_iteration: + + VectorTools::project (temperature_dof_handler, + temperature_constraints, + QGauss(temperature_degree+2), + EquationData::TemperatureInitialValues(), + old_temperature_solution); + + timestep_number = 0; + double time = 0; + + do + { + std::cout << "Timestep " << timestep_number + << ": t=" << time + << ", dt=" << time_step + << std::endl; + + assemble_stokes_system (); + build_stokes_preconditioner (); + assemble_temperature_matrix (); + + solve (); + + output_results (); + + std::cout << std::endl; + + if ((timestep_number == 0) && + (pre_refinement_step < n_pre_refinement_steps)) + { + refine_mesh (initial_refinement + n_pre_refinement_steps); + ++pre_refinement_step; + goto start_time_iteration; + } + else + if ((timestep_number > 0) && (timestep_number % 5 == 0)) + refine_mesh (initial_refinement + n_pre_refinement_steps); + + time += time_step; + ++timestep_number; + + old_old_temperature_solution = old_temperature_solution; + old_temperature_solution = temperature_solution; + } + while (time <= 100); +} + + + + // @sect3{The main function} +int main (int argc, char *argv[]) +{ +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + MPI_Init (&argc,&argv); +#else + (void)argc; + (void)argv; +#endif + + try + { + deallog.depth_console (0); + + BoussinesqFlowProblem<2> flow_problem; + flow_problem.run (); + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + MPI_Finalize(); +#endif + + return 0; +} diff --git a/deal.II/examples/step-4/Makefile b/deal.II/examples/step-4/Makefile index c277ba5a3a..f315b1daa1 100644 --- a/deal.II/examples/step-4/Makefile +++ b/deal.II/examples/step-4/Makefile @@ -14,7 +14,7 @@ target = $(basename $(shell echo step-*.cc)) # run-time checking of parameters and internal states is performed, so # you should set this value to `on' while you develop your program, # and to `off' when running production computations. -debug-mode = on +debug-mode = off # As third field, we need to give the path to the top-level deal.II