From: Luca Heltai Date: Mon, 23 Apr 2018 15:44:31 +0000 (+0200) Subject: First commented version of step-60 X-Git-Tag: v9.0.0-rc4~7^2~9 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=11b67bb50c7a76d21058b9c4d9f6dfeec3c21bf5;p=dealii.git First commented version of step-60 Co-authored-by: Giovanni Alzetta --- diff --git a/examples/step-60/CMakeLists.txt b/examples/step-60/CMakeLists.txt new file mode 100644 index 0000000000..99bfe40e4d --- /dev/null +++ b/examples/step-60/CMakeLists.txt @@ -0,0 +1,53 @@ +## +# CMake script for the step-60 tutorial program: +## + +# Set the name of the project and target: +SET(TARGET "step-60") + +# Declare all source files the target consists of. Here, this is only +# the one step-X.cc file, but as you expand your project you may wish +# to add other source files as well. If your project becomes much larger, +# you may want to either replace the following statement by something like +# FILE(GLOB_RECURSE TARGET_SRC "source/*.cc") +# FILE(GLOB_RECURSE TARGET_INC "include/*.h") +# SET(TARGET_SRC ${TARGET_SRC} ${TARGET_INC}) +# or switch altogether to the large project CMakeLists.txt file discussed +# in the "CMake in user projects" page accessible from the "User info" +# page of the documentation. +SET(TARGET_SRC + ${TARGET}.cc + ) + +# Usually, you will not need to modify anything beyond this point... + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12) + +FIND_PACKAGE(deal.II 9.0.0 QUIET + HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR} + ) +IF(NOT ${deal.II_FOUND}) + MESSAGE(FATAL_ERROR "\n" + "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n" + "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n" + "or set an environment variable \"DEAL_II_DIR\" that contains this path." + ) +ENDIF() + +# +# Are all dependencies fulfilled? +# +IF(NOT DEAL_II_WITH_UMFPACK) # keep in one line + MESSAGE(FATAL_ERROR " +Error! This tutorial requires a deal.II library that was configured with the following options: + DEAL_II_WITH_UMFPACK = ON +However, the deal.II library found at ${DEAL_II_PATH} was configured with these options + DEAL_II_WITH_UMFPACK = ${DEAL_II_WITH_UMFPACK} +which conflict with the requirements." + ) +ENDIF() + + +DEAL_II_INITIALIZE_CACHED_VARIABLES() +PROJECT(${TARGET}) +DEAL_II_INVOKE_AUTOPILOT() diff --git a/examples/step-60/doc/intro.dox b/examples/step-60/doc/intro.dox new file mode 100644 index 0000000000..7fd050893c --- /dev/null +++ b/examples/step-60/doc/intro.dox @@ -0,0 +1,205 @@ +
+ +This program was contributed by Luca Heltai and Giovanni Alzetta, SISSA, Trieste. + +

Introduction

+ + +

Distributed Lagrange multipliers

+ + +In this tutorial we consider the case of two domains, $\Omega$ in +$R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is +embedded in $\Omega$ and thus $\Gamma \subseteq \Omega$. We want to solve a +partial differential equation on $\Omega$, enforcing some conditions on the +solution of the problem *on the embedded domain* $\Gamma$. + +There are two interesting scenarios: + +- the geometrical dimension `dim` of the embedded domain $\Gamma$ is the +same of the domain $\Omega$ (`spacedim`), that is, the spacedim-dimensional measure +of $\Gamma$ is not zero, or + +- the embedded domain $\Gamma$ has an intrinsic dimension `dim` which is smaller +than that of $\Omega$ (`spacedim`), thus its spacedim-dimensional measure is +zero; for example it is a curve embedded in a two +dimensional domain, or a surface embedded in a three-dimensional domain. + +In both cases define the restriction operator $\gamma$ as the operator +that, given a continuous function on $\Omega$, returns its (continuous) +restriction on $\Gamma$, i.e., +\f[ +\gamma : C^0(\Omega) \mapsto C^0(\Gamma), \quad \text{ s.t. } \gamma u = u|_{\Gamma} \in C^0(\Gamma), +\quad \forall u \in C^0(\Omega). +\f] + +It is well known that the operator $\gamma$ can be extended to a continuous +operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in +$H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$. + +The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), +when the dimension of $\Gamma$ is one less with respect to $\Omega$, and +$\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ +is also known as the *trace* operator, and it is well defined for Lipschitz +co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$. + +The co-dimension two case is a little more complicated, and in general it is +not possible to construct a continous trace operator, not even from +$H^1(\Omega)$ to $L^2(\Gamma)$, when the dimension of $\Gamma$ is zero or one +respectively in two and three dimensions. + +In this tutorial program we're not interested in further details on $\gamma$: +we take the extension $\gamma$ for granted, assuming that the dimension of the +embedded domain (`dim`) is always smaller by one or equal w.r.t. the dimension +of the embedding domain $\Omega$ (`spacedim`). + +We are going to solve the following differential problem: given a sufficiently +regular function $g$ on $\Gamma$, find the solution $u$ to + +@f{eqnarray*} +- \Delta u + \gamma^T \lambda &=& 0 & \text{ in } \Omega\\ +\gamma u &=& g & \text{ in } \Gamma \\ +u & = & 0 & \text{ on } \partial\Omega. +@f} + +This is a constrained problem, where we are looking for a harmonic function +$u$, that satisfies homogeneous boundary conditions on $\partial\Omega$, subject +to the constraint $\gamma u = g$ using a Lagrange multiplier. + +The variational formulation can be derived by introducing two infinite +dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution +$u$ and for the Lagrange multiplier $\lambda$. + +Multiplying the first equation by $v \in V(\Omega)$ and the second by $q \in +Q(\Gamma)$, integrating by parts when possible, and exploiting the boundary +conditions on $\partial\Omega$, we obtain the following variational problem: + +Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to +@f{eqnarray*} +(\nabla u, \nabla v)_{\Omega} + (\lambda, \gamma v)_{\Gamma} &=& 0 & \forall v \in V(\Omega) \\ +(\gamma u, q)_{\Gamma} &=& (g,q)_{\Gamma} & \forall q \in Q(\Gamma), +@f} + +where $(\cdot, \cdot)_{\Omega}$ (respectively $(\cdot, \cdot)_{\Gamma}$) +represent the $L^2$ scalar product in $\Omega$ (respectively in $\Gamma$). + +Inspection of the variational formulation tells us that the space $V(\Omega)$ +can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension +zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case +should be taken as $H^{1/2}(\Gamma)$. + +The function $g$, therefore, should be either in $H^1(\Gamma)$ (for the +co-dimension zero case) or $H^{1/2}(\Gamma)$ (for the co-dimension one case). +This leaves us with a Lagrange multiplier $\lambda$ in $Q^*(\Gamma)$, which is +either $(H^1(\Gamma))^*$ or $H^{-1/2}(\Gamma)$. + +There are two options for the discretisation of the problem above. One could choose +matching discretisations, where the Triangulation for $\Gamma$ is aligned with the +Triangulation for $\Omega$, or one could choose to discretize the two domains in +a completely independent way. + +While the first option is clearly more indicated for the simple problem we +proposed above, if the domain $\Gamma$ was to be time dependent, then the +second option could be a more viable solution. + +The technique we describe here is indicated in the literature with many names: +the **immersed finite element method**, the **fictitious boundary method**, the +**distributed Lagrange multiplier method**, and others. The main principle is +that the discretisation of the two grids and of the two finite element spaces +are kept completely independent. This technique is particularly efficient for +the simulation of fluid-structure interaction problems, where the configuration +of the embedded structure is part of the problem itself, and one solves a +(possibly non-linear) elastic problem to determine the (time dependent) +configuration of $\Gamma$, and a (possibly non-linear) flow problem in $\Omega +\setminus \Gamma$, plus coupling conditions on the interface between the fluid +and the solid. + +In this tutorial program we keep things a little simpler, and we assume that the +configuration of the embedded domain is given in one of two possible ways: + +- as a deformation mapping $\psi: \Gamma_0 \mapsto \Gamma \subseteq \Omega$, +defined on a continuous finite dimensional space on $\Gamma_0$ and representing, +for any point $x \in \Gamma_0$ its coordinate $\psi(x)$ in $\Omega$; + +- as a displacement mapping $\delta \psi(x) = \psi(x)-x$ for $x\in \Gamma_0$, +representing for any point $x$ the displacement vector to apply in order to +deform $x$ to its actual configuration $\psi(x) = x +\delta\psi(x)$. + +We define the embedded reference domain $\Gamma_0$ `embedded_grid`, and on +this domain, we construct a finite dimensional space (`embedded_configuration_dh`) +to describe either the deformation or the displacement, through a FiniteElement +system of FE_Q objects (`embedded_configuration_fe`). This finite dimensional +space is used only to interpolate a user supplied function +(`embedded_configuration_function`) representing either $\psi$ (if the +parameter `use_displacement` is set to false) or $\delta\psi$ (if the parameter +`use_displacement` is set to true). + +The Lagrange multiplier $\lambda$ and the user supplied function $g$ are +defined through another finite dimensional space `embedded_dh`, and through +another FiniteElement `embedded_fe`, using the same reference domain. In +order to take into account the deformation of the domain, a MappingFEField or a +MappingQEulerian object are initialized with the `embedded_configuration` vector. + +In the embedding space, a standard finite dimensional space `space_dh` is +constructed on the embedding grid `space_grid` (`space_dh`), using the +FiniteElement `space_fe`, following almost verbatim what was done in step-6. + +We represent the discretizations of the spaces $V$ and $Q$ with +\f[ +V_h(\Omega) = \text{span} \{v_i\}_{i=1}^n +\f] +and +\f[ +Q_h(\Gamma) = \text{span} \{q_i\}_{i=1}^m +\f] +respectively, where $n$ is the dimension of `space_dh`, and $m$ +the dimension of `embedded_dh`. + +Once all the finite dimensional spaces are defined, we are left with the following +finite dimensional system of equations: + +\f[ +\begin{pmatrix} +K & C^T \\ +C & 0 +\end{pmatrix} +\begin{pmatrix} +u \\ +\lambda +\end{pmatrix} += +\begin{pmatrix} +0 \\ +G +\end{pmatrix} +\f] + +where + +@f{eqnarray*} +K_{ij} &:=& (\nabla v_j, \nabla v_i)_\Omega & i,j=1,\dots,n \\ +C_{\alpha j} &:=& (v_j, \nabla q_\alpha)_\Gamma &j=1,\dots,n, \alpha = 1,\dots, m \\\\ +G_{\alpha} &:=& (g, q_\alpha)_\Gamma & \alpha = 1,\dots, m. +@f} + +While the matrix $K$ is the standard stiffness matrix for the Poisson problem +on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a +finite element problem with forcing term $g$ on $\Gamma$, (see, for example, +step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they +couple information on two non-matching grids. + +In particular, the integral that appear in the computation of a single entry of $C$, +is computed on $\Gamma$. As usual in finite elements, we split this integral on each +cell of the triangulation used to discretize $\Gamma$, we tranform the integral on $K$ to +an integral on the reference element $\hat K$, where $F_{K}$ is the corresponding +shape function, and compute the integral there using a quadrature formula: + +\f[ +C_{\alpha j} := (v_j, \nabla q_\alpha)_\Gamma = \sum_{K\in \Gamma} \int_{\hat K} +\hat q_\alpha(\hat x) (v_j \circ F_{K}) (\hat x) J_K (\hat x) \mathrm{d} \hat x = +\sum_{K\in \Gamma} \sum_{i=1}^{n_q} \big(\hat q_\alpha(\hat x_i) (v_j \circ F_{K}) (\hat x_i) J_K (\hat x_i) w_i \big) +\f] + +We solve the above saddle point problem by iterating over the Schur complement +(which is described, for example, in step-20), +and we construct such complement using LinearOperator classes. diff --git a/examples/step-60/doc/kind b/examples/step-60/doc/kind new file mode 100644 index 0000000000..c1d9154931 --- /dev/null +++ b/examples/step-60/doc/kind @@ -0,0 +1 @@ +techniques diff --git a/examples/step-60/doc/results.dox b/examples/step-60/doc/results.dox new file mode 100644 index 0000000000..9c7a88bb4d --- /dev/null +++ b/examples/step-60/doc/results.dox @@ -0,0 +1,37 @@ +

Results

+ +

Test case 1:

+ +For the default problem the value of u on Gamma is 1 and on $\partial\Omega$ +is 0. This means we expect the following solution: + + + + + +

Possibilities for extensions

+ +

Different Parameters

+ +

Parallel Code

+ +The simple code proposed here can serve as a starting point for more +complex problems which, to be solved, need to be run on parallel +code, possibly using distributed meshes (see step-17, step-40, and the +documentation for parallel::shared::Triangulation and +parallel::distributed::Triangulation ). + +When using non-matching grids in parallel a problem arises: +to compute the matrix $C$ a process needs information about both +meshes on the same portion of real space but, when working with +distributed meshes, this might not be the case. +Various strategies can be implemented to tackle this problem: + +- distribute the two meshes so that this constraint is satisfied +- use communication for the parts of real space where the constraint is + not satisfied +- make use of a shared triangulation and a distributed triangulation + +The latter strategy is clearly the easier to implement, as all +the function used in this tutorial program can work lettin $\Omega$ +be distributed and $\Gamma$ be a shared triangulation. diff --git a/examples/step-60/doc/tooltip b/examples/step-60/doc/tooltip new file mode 100644 index 0000000000..0bb91e23a0 --- /dev/null +++ b/examples/step-60/doc/tooltip @@ -0,0 +1 @@ +The fictitious domain method using distributed Lagrange multipliers diff --git a/examples/step-60/step-60.cc b/examples/step-60/step-60.cc new file mode 100644 index 0000000000..5483e95e1b --- /dev/null +++ b/examples/step-60/step-60.cc @@ -0,0 +1,1056 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2018 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE at + * the top level of the deal.II distribution. + * + * --------------------------------------------------------------------- + * + * Author: Luca Heltai, Giovanni Alzetta, + * International School for Advanced Studies, Trieste, 2018 + */ + +// @sect3{Include files} +// Most of these have been introduced elsewhere, we'll comment only on the new ones. + +#include +#include +#include + +#include + +// The parameter acceptor class is the first novelty of this tutorial program: +// in general parameter files are used to steer the execution of a program +// at run time. While even a simple approach saves compiling time, as the same +// executable can be run with different parameter settings, it can become +// difficult to handle hundreds of parameters simultaneously while maintaining +// compatibility between different programs. This is where the class +// ParameterAcceptor proves useful. +// +// This class is used to define a public interface for classes that want to use +// a single global ParameterHandler to handle parameters. The class provides a +// static ParameterHandler member, namely ParameterAcceptor::prm, and +// implements the "Command design patter" (see, for example, E. Gamma, R. Helm, +// R. Johnson, J. Vlissides, Design Patterns: Elements of Reusable +// Object-Oriented Software, Addison-Wesley Professional, 1994. +// https://goo.gl/FNYByc). +// +// ParameterAcceptor provides a global subscription mechanism. Whenever an +// object of a class derived from ParameterAcceptor is constructed, a pointer +// to that object-of-derived-type is registered, together with a section entry +// in the parameter file. Such registry is traversed upon invocation of the +// single function ParameterAcceptor::initialize(file.prm) which in turn makes +// sure that all classes stored in the global register declare the parameters +// they will be using, and after having declared them, it reads the content of +// `file.prm` to parse the actual parameters. +// +// If you call the method ParameterHandler::add_parameter for each of the +// parameters you want to use in your code, there is nothing else you need to +// do. If you are using an already existing class that provides the two +// functions `declare_parameters` and `parse_parameters`, you can still use +// ParameterAcceptor, by encapsulating the existing class into a +// ParameterAcceptorProxy class. +// +// In this example, we'll use both strategies, using ParameterAcceptorProxy for +// deal.II classes, and deriving our own parameter classes directly from +// ParameterAcceptor. + +#include +#include +#include + +#include + +// The other new include file is the one that contains the GridTools::Cache +// class. The structure of deal.II, as many modern numerical libraries, is +// organized following a Directed Acyclic Graph (DAG). A DAG is a directed graph +// with topological ordering: each node structurally represents an object, and +// one or more directed edges represent how it can be used to generate new +// objects. This has several advantages, but intrinsically creates “asymmetries” +// as certain operations are fast unlike their inverse. For example, in deal.II +// finding the vertices of a cell has very low computational cost, while finding +// all the cells that share a vertex requires a non-trivial computation unless a +// new data structure is added. +// +// Since inverse operations are usually not needed in a finite element code, +// these are implemented in GridTools without the use of extra data structures +// related to the Triangulation which would make them much faster. One such data +// structure, for example, is a map from the vertices of a Triangulation to all +// cells that share those vertices, which would reduce the computations needed +// to answer to the previous example. +// +// Some methods, for example GridTools::find_active_cell_around_point, make +// heavy usage of these non-standard operations. If you need to call these +// methods more than once, it becomes convenient to store those data structures +// somewhere. GridTools::Cache does exactly this, giving you access to +// previously computed objects, or computing them on the fly (and then storing +// them inside the class for later use), and making sure that whenever the +// Triangulation is updated, also the relevant data strucutres are recomputed. + +#include +#include +#include + +#include +#include + +// In this example, we will be using a reference domain to describe an embedded +// Triangulation, deformed through a finite element vector field. +// +// The two include files above contain the definition of two classes that can be +// used in these cases. MappingQEulerian allows one to describe a domain through +// a *displacement* field, based on a FESystem[FE_Q(p)^spacedim] finite element +// space. The second is a little more generic, and allows you to use arbitrary +// vector FiniteElement spaces, as long as they provide a *continuous* +// description of your domain. In this case, the description is done through the +// actual *deformation* field, rather than a *displacement* field. +// +// Which one is used depends on how the user wants to specify the reference +// domain, and/or the actual configuration. We'll provide both options, and +// experiment a little in the results section of this tutorial program. + +#include + +#include + +// The parsed function class is another new entry. It allows one to create a +// Function object, starting from a string in a parameter file which is parsed +// into an object that you can use anywhere deal.II accepts a Function (for +// example, for interpolation, boundary conditions, etc.). + +#include +#include +#include + +#include + +// This is the last new entry for this tutorial program. The namespace +// NonMatching contains a few methods that are useful when performing +// computations on non-matching grids, or on curves that are not aligned with +// the underlying mesh. +// +// We'll discuss its use in details later on in the `setup_coupling` method. + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace Step60 +{ + using namespace dealii; + + // In the DistributedLagrangeProblem, we need two parameters describing the + // dimensions of the domain $\Gamma$ (`dim`) and of the domain $\Omega$ + // (`spacedim`). + // + // These will be used to initialize a Triangulation (for + // $\Gamma$) and a Triangulation (for $\Omega$). + // + // A novelty w.r.t. other tutorial programs is the heavy use of + // std::unique_ptr. These behave like classical pointers, with the advantage + // of doing automatic house-keeping: the contained object is automatically + // destroyed as soon as the unique_ptr goes out of scope, even if it is inside + // a container or there's an exception. Moreover it does not allow for + // duplicate pointers, which prevents ownership problems. We do this, because + // we want to be able to i) construct the problem, ii) read the parameters, + // and iii) initialize all objects according to what is specified in a + // parameter file. + // + // We construct the parameters of our problem in the internal class + // DistributedLagrangeProblemParameters, derived from ParameterAcceptor. The + // DistributedLagrangeProblem class takes a const reference to a + // DistributedLagrangeProblemParameters object, so that it is not possible to + // modify the parameters from within the DistributedLagrangeProblem class + // itself. + // + // We could have initialized the parameters first, and then pass the + // parameters to the DistributedLagrangeProblem assuming all entries are set to + // the desired values, but this has two disadvantages: + // + // - we should not make assumptions on how the user initializes a class that + // is not under our direct control. If the user fails to initialize the + // class, we should notice and throw an exception; + // + // - not all objects that need to read parameters from a parameter file may + // be available when we construct the DistributedLagrangeProblemParameters; + // this is often the case for complex programs, with multiple physics, or + // where we reuse existing code in some external classes. We simulate this by + // keeping some "complex" objects, like ParsedFunction objects, inside the + // DistributedLagrangeProblem instead of inside the + // DistributedLagrangeProblemParameters. + // + // Here we assume that upon construction, the classes that build up our + // problem are not usable yet. Parsing the parameter file is what ensures we have + // all ingredients to build up our classes, and we design them so that if parsing + // fails, or is not executed, the run is aborted. + + template + class DistributedLagrangeProblem + { + public: + + // The DistributedLagrangeProblemParameters is derived from + // ParameterAcceptor. This allows us to use the + // ParameterAcceptor::add_parameter methods in its constructor. + // + // The members of this function are all non-const, but the + // DistributedLagrangeProblem class takes a const reference to a + // DistributedLagrangeProblemParameters object: this ensures that + // parameters are not modified from within the DistributedLagrangeProblem + // class. + + class DistributedLagrangeProblemParameters : public ParameterAcceptor + { + public: + DistributedLagrangeProblemParameters(); + + // The parameters now described can all be set externally + // using a parameter file: if no parameter file is present when running + // the executable, the program shall create a parameters.prm file with + // the default parameters here encoded, and then abort as no + // parameters.prm file was found. + + // Initial refinement for the embedding grid, corresponding to the domain + // $\Omega$. + unsigned int initial_refinement = 4; + + // The interaction between the embedded grid $\Omega$ and the embedding + // grid $\Gamma$ is handled throught the computation of $C$, which + // involves all cells of $\Omega$ overlapping with parts of $\Gamma$: + // a higher refinement of such cells might improve the results quality. + // For this reason we define `delta_refinement`: if it is greater + // than zero, then we mark each cell of the space grid that contains + // a vertex of the embedded grid, execute the refinement, and repeat + // this process `delta_refinement` times. + unsigned int delta_refinement = 3; + + // Starting refinement of the embedded grid, corresponding to the domain + // $\Gamma$. + unsigned int initial_embedded_refinement = 7; + + // A list of boundary ids where we impose homogeneous Dirichlet boundary + // conditions. On the remaining boundary ids (if any), we impose + // homogeneous Neumann boundary conditions + std::list homogeneous_dirichlet_ids {0}; + + // FiniteElement degree of the embedding space: $V_h(\Omega)$ + unsigned int embedding_space_finite_element_degree = 1; + + // FiniteElement degree of the embedded space: $Q_h(\Gamma)$ + unsigned int embedded_space_finite_element_degree = 1; + + // FiniteElement degree of the space used to describe the deformation + // of the embedded domain + unsigned int embedded_configuration_finite_element_degree = 1; + + // Order of the quadrature formula used to integrate the coupling + unsigned int coupling_quadrature_order = 3; + + // If set to true, then the embedded configuration function is + // interpreted as a displacement function + bool use_displacement = false; + + // Level of verbosity to use in the output + unsigned int verbosity_level = 10; + + // A flag to keep track if we were initialized or not + bool initialized = false; + }; + + DistributedLagrangeProblem(const DistributedLagrangeProblemParameters ¶meters); + + // Entry point for the DistributedLagrangeProblem + void run(); + + private: + // Object containing the actual parameters + const DistributedLagrangeProblemParameters ¶meters; + + // The following functions are similar to all other tutorial programs, with + // the exception that we now need to setup things for two different + // families of objects, namely the ones related to the *embedding* grids, and the + // ones related to the *embedded* one. + + void setup_grids_and_dofs(); + + void setup_embedding_dofs(); + + void setup_embedded_dofs(); + + // The only unconventional function we have here is the `setup_coupling()` method, + // used to generate the sparsity patter for the coupling matrix $C$. + + void setup_coupling(); + + void assemble_system(); + + void solve(); + + void output_results(); + + + // First we gather all the objects related to the embedding space geometry + + std::unique_ptr > space_grid; + std::unique_ptr > space_grid_tools_cache; + std::unique_ptr > space_fe; + std::unique_ptr > space_dh; + + // Then the ones related to the embedded grid, with the DoFHandler associated + // to the Lagrange multiplier `lambda` + + std::unique_ptr > embedded_grid; + std::unique_ptr > embedded_fe; + std::unique_ptr > embedded_dh; + + // And finally, everything that is needed to *deform* the embedded + // triangulation + std::unique_ptr > embedded_configuration_fe; + std::unique_ptr > embedded_configuration_dh; + Vector embedded_configuration; + + // The ParameterAcceptorProxy class, is a "transparent" wrapper, derived + // from both ParameterAcceptor and the type passed as its template + // parameter. At construction, the arguments are split into two parts: the + // first argument is an std::string, forwarded to the ParameterAcceptor + // class, and containing the name of the section that should be used for + // this class, while all the remaining arguments are forwarded to the + // constructor of the templated type, in this case, to the + // Functions::ParsedFunction constructor. + // + // This class allows you to use existing classes in conjunction with the + // ParameterAcceptor registration mechanism, provided that those classes + // have the members `declare_parameters()` and `parse_parameters()`. + // + // This is the case here, making it fairly easy to exploit the + // Functions::ParsedFunction class: instead of requiring the user to create + // new Function objects in its code for the RHS, boundary functions, etc., + // (like it is done in most of the other tutorials), here we allow the user + // to use deal.II interface to muParser (http://muparser.beltoforion.de), + // where the specification of the function is not done at compile time, but + // at run time, using a string that is parsed into an actual Function object. + // + // In this case, the `embedded_configuration_function` is a vector valued + // Function that can be interpreted as either a *deformation* or a + // *displacement* according to the boolean value of + // parameters.use_displacement. The number of components is specified later + // on in the construction. + + ParameterAcceptorProxy > + embedded_configuration_function; + + + // The embedded mapping. Notice that the order in which we construct these + // unique pointers is important. They will be destroied in the reversed + // order, so it is important that we respect the dependency tree. In + // particular, the embedded mapping will depend on both the `embedded_dh` + // and the `embedded_configuration`. If we declare it after the above two, + // we are fine, otherwise we would have do release this pointer manually in the + // destructor, or we'd get an error like + // + // @code + // -------------------------------------------------------- + // An error occurred in line <104> of file <../source/base/subscriptor.cc> in function + // void dealii::Subscriptor::check_no_subscribers() const + // The violated condition was: + // counter == 0 + // Additional information: + // (none) + // @endcode + // + // at the end of the program. + + std::unique_ptr > embedded_mapping; + + // We do the same thing to specify the value of the function $g$, + // which is what we want our solution to be in the embedded space. + // In this case the Function is a scalar one. + + ParameterAcceptorProxy > + embedded_value_function; + + // Similarly to what we have done with the Functions::ParsedFunction class, + // we repeat the same for the ReductionControl class, allowing us to + // specify all possible stopping criterions for the Schur complement + // iterative solver we'll use later on. + + ParameterAcceptorProxy schur_solver_control; + + // Next we gather all SparsityPattern, SparseMatrix, and Vector objects + // we'll need + + SparsityPattern stiffness_sparsity; + SparsityPattern coupling_sparsity; + SparsityPattern embedded_sparsity; + + SparseMatrix stiffness_matrix; + SparseMatrix coupling_matrix; + SparseMatrix embedded_stiffness_matrix; + + ConstraintMatrix constraints; + + Vector solution; + Vector rhs; + + Vector lambda; + Vector embedded_rhs; + Vector embedded_value; + + // The TimerOuput class is used to provide some statistics on + // the performance of our program. + TimerOutput monitor; + }; + + // At construction time, we initialize also the ParameterAcceptor class, with + // the section name we want our problem to use when parsing the parameter + // file. + // + // Parameter files can be organized into section/subsection/etc. : + // this has the advantage that defined objects share parameters when + // sharing the same section/subsection/etc. ParameterAcceptor allows + // to specify the section name using unix conventions on paths. + // If the section name starts with a slash ("/"), then the section is + // interpreted as an *absolute path*, ParameterAcceptor enters a subsection + // for each directory in the path, using the last name it encountered as + // the landing subsection for the current class. + // + // For example, if you construct your class using + // ParameterAcceptor("/first/second/third/My Class"), the parameters will be + // organized as follows: + // + // @code + // subsection first + // subsection second + // subsection third + // subsection My Class + // ... # all the parameters + // end + // end + // end + // end + // @endcode + // + // Internally, the *current path* stored in ParameterAcceptor, is now + // considered to be "/first/second/third/", i.e. when you specify an + // absolute path, ParameterAcceptor *changes* the current section to the + // current path, i.e. to the path of the section name until the *last* "/". + // + // You can now construct another class derived from ParameterAcceptor using a + // relative path (e.g., ParameterAcceptor("My Other Class")) instead of the + // absolute one (e.g. ParameterAcceptor("/first/second/third/My Other Class")), + // obtaining: + // @code + // subsection first + // subsection second + // subsection third + // subsection MyClass + // ... # all the parameters + // end + // subsection My Other Class + // ... # all the parameters of MyOtherClass + // end + // end + // end + // end + // @endcode + // + // If the section name *ends* with a slash then subsequent classes will + // interpret this as a full path: for example, similar to the one above, if + // we have two classes, one initialized with + // ParameterAcceptor("/first/second/third/My Class/") + // and the other with ParameterAcceptor("My Other Class"), then the + // resulting parameter file will look like: + // + // @code + // subsection first + // subsection second + // subsection third + // subsection MyClass + // ... # all the parameters + // ... # notice MyClass subsection does not end here + // subsection My Other Class + // ... # all the parameters of MyOtherClass + // end + // end # of subsection MyClass + // end + // end + // end + // @endcode + // + // We are going to exploit this, by making our + // DistributedLagrangeProblemParameters + // the *parent* of all subsequently constructed classes. Since most of the other + // classes are members of DistributedLagrangeProblem this allows, for example, + // to construct two DistributedLagrangeProblem for two different dimensions, without + // having conflicts in the parameters for the two problems. + + template + DistributedLagrangeProblem::DistributedLagrangeProblemParameters:: + DistributedLagrangeProblemParameters() : + ParameterAcceptor("/Distributed Lagrange<" + Utilities::int_to_string(dim) + + "," + Utilities::int_to_string(spacedim) +">/") + { + + // The ParameterAcceptor::add_parameter does a few things: + // + // - enters the subsection specified at construction time to ParameterAcceptor + // + // - calls the ParameterAcceptor::prm.add_parameter + // + // - calls any signal you may have attached to + // ParameterAcceptor::declare_parameters_call_back + // + // - leaves the subsection + // + // In turns, ParameterAcceptor::prm.add_parameter + // + // - declares an entry in the parameter handler for the given variable; + // + // - reads the value of the variable, + // + // - transforms it to a string, used as the default value for the parameter + // file + // + // - attaches an *action* to ParameterAcceptor::prm that monitors when a file + // is parsed, or when an entry is set, and when this happens, it updates the + // content of the given variable to the value parsed by the string + + add_parameter("Initial embedding space refinement", + initial_refinement); + + add_parameter("Initial embedded space refinement", + initial_embedded_refinement); + + add_parameter("Local refinements steps near embedded domain", + delta_refinement); + + add_parameter("Homogeneous Dirichlet boundary ids", + homogeneous_dirichlet_ids); + + add_parameter("Use displacement in embedded interface", + use_displacement); + + add_parameter("Embedding space finite element degree", + embedding_space_finite_element_degree); + + add_parameter("Embedded space finite element degree", + embedded_space_finite_element_degree); + + add_parameter("Embedded configuration finite element degree", + embedded_configuration_finite_element_degree); + + add_parameter("Coupling quadrature order", + coupling_quadrature_order); + + add_parameter("Verbosity level", + verbosity_level); + + // If we get parsed, then the parameters are good to go. Set the internal + // variable `initialized` to true. + parse_parameters_call_back.connect( + [&]() -> void {initialized = true;} + ); + } + + // The constructor is pretty standard, with the exception of the `ParameterAcceptorProxy` + // objects, as explained earlier on. + + template + DistributedLagrangeProblem::DistributedLagrangeProblem( + const DistributedLagrangeProblemParameters ¶meters) : + parameters(parameters), + embedded_configuration_function("Embedded configuration", spacedim), + embedded_value_function("Embedded value"), + schur_solver_control("Schur solver control"), + monitor(std::cout, + TimerOutput::summary, + TimerOutput::cpu_and_wall_times) + { + // Here is a way to set default values for a ParameterAcceptor class + // that was constructed using ParameterAcceptorProxy. + // + // In this case, we set the default deformation of the embedded grid to be + // a circle with radius `R` and center (Cx, Cy), we set the default value + // for the embedded_value_function to be the constant one, and specify some + // sensible values for the SolverControl object. + // + // It is fundamental for $\Gamma% to be embedded: from the definition of + // $C_{\alpha j}$ is clear that, if $\Gamma \not\subseteq \Omega$, certain + // rows of the matrix $C$ shall be zero. This would be a problem, as the Schur + // complement method requires $C$ needs to have full column rank. + + embedded_configuration_function.declare_parameters_call_back.connect( + [] () -> void + { + ParameterAcceptor::prm.set("Function constants", "R=.3, Cx=.4, Cy=.4"); + ParameterAcceptor::prm.set("Function expression", "R*cos(2*pi*x)+Cx; R*sin(2*pi*x)+Cy"); + }); + + embedded_value_function.declare_parameters_call_back.connect( + [] () -> void + { + ParameterAcceptor::prm.set("Function expression", "1"); + }); + + schur_solver_control.declare_parameters_call_back.connect( + [] () -> void + { + ParameterAcceptor::prm.set("Max steps", "1000"); + ParameterAcceptor::prm.set("Reduction", "1.e-12"); + ParameterAcceptor::prm.set("Tolerance", "1.e-12"); + }); + + } + + // Nothing special here, except that we check if parsing was done before + // we actually attempt to run our program. + + template + void DistributedLagrangeProblem::run() + { + AssertThrow(parameters.initialized, + ExcNotInitialized()); + deallog.depth_console(parameters.verbosity_level); + + setup_grids_and_dofs(); + setup_coupling(); + assemble_system(); + solve(); + output_results(); + } + + + // The function DistributedLagrangeProblem::setup_grids_and_dofs + // is used to set up the finite element spaces. Notice how + // @code std_cxx14::make_unique @endcode is used to create objects + // wrapped inside unique pointers + + template + void DistributedLagrangeProblem::setup_grids_and_dofs() + { + TimerOutput::Scope timer_section(monitor, "Setup grids and dofs"); + + // Initializing $\Omega$: + // constructing the Triangulation and wrapping it into a unique_ptr + space_grid = std_cxx14::make_unique >(); + GridGenerator::hyper_cube(*space_grid); + // Requesting the varius values to the parameters object, which is + // of type DistributedLagrangeProblemParameters + space_grid->refine_global(parameters.initial_refinement); + space_grid_tools_cache = + std_cxx14::make_unique >(*space_grid); + // The refinement of $\Omega$ depends on $\Gamma$: this means we need to + // set up $\Gamma$ before we can finish with $\Omega$ + embedded_grid = std_cxx14::make_unique >(); + GridGenerator::hyper_cube(*embedded_grid); + embedded_grid->refine_global(parameters.initial_embedded_refinement); + + // At this point we need to configure the deformation of the embedded grid + embedded_configuration_fe = + std_cxx14::make_unique > + (FE_Q(parameters.embedded_configuration_finite_element_degree), spacedim); + + embedded_configuration_dh = + std_cxx14::make_unique >(*embedded_grid); + + embedded_configuration_dh->distribute_dofs(*embedded_configuration_fe); + embedded_configuration.reinit(embedded_configuration_dh->n_dofs()); + + // Interpolating the embedded configuration function + VectorTools::interpolate(*embedded_configuration_dh, + embedded_configuration_function, + embedded_configuration); + + // Once we have the embedded configuration vector stored, we can interpret + // it according to what the user has specified in the parameter file, as a + // displacement, in which case we construct a mapping that *displaces* the + // position of each support point of our configuration finite element space + // by the specified amount on the corresponding configuration vector, or as + // an absolution position. + // + // In the first case, the class MappingQEulerian offers its services, while + // in the second one, we'll use the class MappingFEField. They are in fact + // very similar. MappingQEulerian will only work for systems of FE_Q finite + // element spaces, where the displacment vector is stored in the first + // `spacedim` components of the FESystem, and the degree given as a + // parameter at construction time, must match the degree of the first + // `spacedim` components. + // + // The class MappingFEField is slightly more general, in that it allows you + // to select arbitrary FiniteElement types when constructing your + // approximation. Naturally some choices may (or may not) make sense, + // according to the type of FiniteElement you choose. MappingFEField + // implements the pure iso-parametric concept, and can be used, for example, + // to implement iso-geometric analysis codes in deal.II, by combining it + // with the FEBernstein finite element class. In this example, we'll use the + // two interchangeably, by taking into account the fact that one + // configuration will be a `displacment`, while the other will be an + // absolute `deformation` field. + + if (parameters.use_displacement == true) + embedded_mapping = + std_cxx14::make_unique, spacedim> > + (parameters.embedded_configuration_finite_element_degree, + *embedded_configuration_dh, embedded_configuration); + else + embedded_mapping = + std_cxx14::make_unique, DoFHandler > > + (*embedded_configuration_dh, + embedded_configuration); + + // Estimating the diameter of the largest active cell of $\Gamma$, and + // the smalles one of $\Omega$ + double embedded_space_maximal_diameter = + GridTools::maximal_cell_diameter(*embedded_grid, *embedded_mapping); + double embedding_space_minimal_diameter = + GridTools::minimal_cell_diameter(*space_grid); + // Setting up the $\Gamma$'s DoFs + setup_embedded_dofs(); + + // With the mapping in place, it is now possible to query what is the + // location of all support points associated with the `embedded_dh`, by + // calling the method DoFTools::map_dofs_to_support_points. + // + // This method has two variants. One that does *not* take a Mapping, and + // one that takes a Mapping. If you use the second type, like we are doing + // in this case, the support points are computed through the specified mapping, + // which can manipulate them accordingly. + // + // This is precisely what the `embedded_mapping` is there for. + + std::vector > support_points(embedded_dh->n_dofs()); + if (parameters.delta_refinement != 0) + DoFTools::map_dofs_to_support_points(*embedded_mapping, + *embedded_dh, + support_points); + + // Once we have the support points of the embedded finite element space, we + // would like to identify what cells of the embedding space contain what + // support point, to get a chance at refining the embedding grid where it is + // necessary, i.e., where the embedded grid is. This can be done manually, + // by looping over each point, and then calling the method + // Mapping::tranform_real_to_unit_cell for each cell of the embedding + // space, until we find one that returns points in the unit reference cell, + // or it can be done in a more intelligent way. + // + // The GridTools::find_active_cell_around_point is a possible option, that + // performs the above task in a cheaper way, by first identifying the + // closest vertex of the embedding Triangulation to the target point, and + // then by calling Mapping::tranform_real_to_unit_cell only for those cells + // that share the found vertex. + // + // In fact, there are algorithm in the GridTools namespace that exploit a + // GridTools::Cache object, and possibly a KDTree object to speed up these + // operations as much as possible. + // + // The simplest way to exploit the maximum speed, is by calling a + // specialized method, GridTools::compute_point_locations, that will store a + // lot of useful information and data structures during the first point + // search, and then reuse all of this for subsequent points. + // + // GridTools::compute_point_locations returns a tuple where the first + // element is a vector of cells containing the input points, in this + // case support_points. For refinement, this is the only information we need, + // and this is exactly what happens now. + // + // When we need to assemble a coupling matrix, however, we'll also need the + // reference location of each point to evaluate the basis functions of the + // embedding space. The other elements of the tuple returned by + // GridTools::compute_point_locations allows you to reconstruct, for each + // point, what cell contains it, and what is the location in the reference + // cell of the given point. Since this information is better grouped into + // cells, then this is what the algorithm returns: a tuple, containing a + // vector of all cells that have at least one point in them, together with a + // list of all reference points and their corresponding index in the + // original vector. + // + // In the following loop, we will be ignoring all returned objects except + // the first, identifying all cells contain at least one support point of + // the embedded space. + // + // Notice that we need to do some sanity checks, in the sense that we want + // to have an embedding grid which is well refined around the embedded grid, + // but where two consecutive support points lie either in the same cell, or + // in neighbor embedding cells. + // + // This is only possible if we ensure that the smallest cell size of the + // embedding grid is nonetheless bigger than the largest cell size of the + // embedded grid. Since users can modify both levels of refinements, as well + // as the amount of local refinement they want around the embedded grid, we + // make sure that the resulting meshes satisfy our requirements, and if this + // is not the case, we bail out with an exception. + + for (unsigned int i=0; i(point_locations); + for (auto cell : cells) + cell->set_refine_flag(); + space_grid->execute_coarsening_and_refinement(); + embedding_space_minimal_diameter = GridTools::minimal_cell_diameter(*space_grid); + AssertThrow(embedded_space_maximal_diameter < embedding_space_minimal_diameter, + ExcMessage("The embedding grid is too refined (or the embedded grid" + "is too coarse). Adjust the parameters so that the minimal" + "grid size of the embedding grid is larger " + "than the maximal grid size of the embedded grid.")) + } + + deallog << "Embedding minimal diameter: " << embedding_space_minimal_diameter + << ", embedded maximal diameter: " << embedded_space_maximal_diameter + << ", ratio: " + << embedded_space_maximal_diameter/embedding_space_minimal_diameter << std::endl; + + // $\Omega$ has been refined and we can now set up its DoFs + setup_embedding_dofs(); + } + + // We now set up the DoFs of $\Omega$ and $\Gamma$: since they are fundamentally + // independent (except for the fact that $\Omega$'s mesh is more refined "around" + // $\Gamma$) the procedure is standard + template + void DistributedLagrangeProblem::setup_embedding_dofs() + { + space_dh = std_cxx14::make_unique >(*space_grid); + space_fe = std_cxx14::make_unique > + (parameters.embedding_space_finite_element_degree); + space_dh->distribute_dofs(*space_fe); + + DoFTools::make_hanging_node_constraints(*space_dh, constraints); + for (auto id:parameters.homogeneous_dirichlet_ids) + { + VectorTools::interpolate_boundary_values(*space_dh, id, + Functions::ZeroFunction(), + constraints); + } + constraints.close(); + + // By definition the stiffness matrix involves only $\Omega$'s DoFs + DynamicSparsityPattern dsp(space_dh->n_dofs(), space_dh->n_dofs()); + DoFTools::make_sparsity_pattern(*space_dh, dsp, constraints); + stiffness_sparsity.copy_from(dsp); + stiffness_matrix.reinit(stiffness_sparsity); + solution.reinit(space_dh->n_dofs()); + rhs.reinit(space_dh->n_dofs()); + + deallog << "Embedding dofs: " << space_dh->n_dofs() << std::endl; + } + + template + void DistributedLagrangeProblem::setup_embedded_dofs() + { + embedded_dh = std_cxx14::make_unique >(*embedded_grid); + embedded_fe = std_cxx14::make_unique > + (parameters.embedded_space_finite_element_degree); + embedded_dh->distribute_dofs(*embedded_fe); + + DynamicSparsityPattern dsp(embedded_dh->n_dofs(), embedded_dh->n_dofs()); + DoFTools::make_sparsity_pattern(*embedded_dh, dsp); + embedded_sparsity.copy_from(dsp); + embedded_stiffness_matrix.reinit(embedded_sparsity); + // By definition the rhs of the system we're solving involves only a zero + // vector and $G$, which is computed using only $\Gamma$'s DoFs + lambda.reinit(embedded_dh->n_dofs()); + embedded_rhs.reinit(embedded_dh->n_dofs()); + embedded_value.reinit(embedded_dh->n_dofs()); + + deallog << "Embedded dofs: " << embedded_dh->n_dofs() << std::endl; + } + + // Creating the coupling sparsity pattern is a complex operation, + // but it can be easily done using the + // NonMatching::create_coupling_sparsity_pattern, which requires the + // two DoFHandlers, the quadrature points for the coupling, + // a DynamicSparsityPattern (which then needs to be copied into the + // sparsity one, as usual), the component mask for the embedding and + // embedded Triangulation (which we leave empty) and the mappings + // for both the embedding and the embedded Triangulation. + template + void DistributedLagrangeProblem::setup_coupling() + { + TimerOutput::Scope timer_section(monitor, "Setup coupling"); + + QGauss quad(parameters.coupling_quadrature_order); + + DynamicSparsityPattern dsp(space_dh->n_dofs(), embedded_dh->n_dofs()); + + NonMatching::create_coupling_sparsity_pattern(*space_dh, + *embedded_dh, + quad, + dsp, ComponentMask(), ComponentMask(), + StaticMappingQ1::mapping, + *embedded_mapping); + coupling_sparsity.copy_from(dsp); + coupling_matrix.reinit(coupling_sparsity); + } + + // This function creates the matrices: as noted before computing + // the stiffness matrix and the rhs is a standard procedure + template + void DistributedLagrangeProblem::assemble_system() + { + { + TimerOutput::Scope timer_section(monitor, "Assemble system"); + + // Embedding stiffness matrix $K$ + MatrixTools::create_laplace_matrix(*space_dh, QGauss(2*space_fe->degree+1), + stiffness_matrix, (const Function *) nullptr, constraints); + + // Embedded stiffness matrix and rhs vector $G$ + MatrixTools::create_laplace_matrix(*embedded_mapping, + *embedded_dh, + QGauss(2*embedded_fe->degree+1), + embedded_stiffness_matrix, + embedded_value_function, + embedded_rhs); + } + { + TimerOutput::Scope timer_section(monitor, "Assemble coupling system"); + + // To compute the coupling matrix we use the NonMatching::create_coupling_mass_matrix + // tool, which works similarly to NonMatching::create_coupling_sparsity_pattern, + // requiring only an additional parameter: a constraint matrix + QGauss quad(parameters.coupling_quadrature_order); + NonMatching::create_coupling_mass_matrix(*space_dh, + *embedded_dh, + quad, + coupling_matrix, ConstraintMatrix(), + ComponentMask(), ComponentMask(), + StaticMappingQ1::mapping, + *embedded_mapping); + + VectorTools::interpolate(*embedded_dh, embedded_value_function, embedded_value); + } + } + + // All parts have been assembled: we solve the system + // using the Schur complement method + template + void DistributedLagrangeProblem::solve() + { + TimerOutput::Scope timer_section(monitor, "Solve system"); + + // Start by creating the inverse stiffness matrix + SparseDirectUMFPACK K_inv_umfpack; + K_inv_umfpack.initialize(stiffness_matrix); + + // Same thing, for the embedded space + SparseDirectUMFPACK A_inv_umfpack; + A_inv_umfpack.initialize(embedded_stiffness_matrix); + // Initializing the operators, as described in the introduction + auto K = linear_operator(stiffness_matrix); + auto A = linear_operator(embedded_stiffness_matrix); + auto Ct = linear_operator(coupling_matrix); + auto C = transpose_operator(Ct); + + auto K_inv = linear_operator(K, K_inv_umfpack); + auto A_inv = linear_operator(A, A_inv_umfpack); + + auto S = C*K_inv*Ct; + // Using the Schur complement method + SolverCG > solver_cg(schur_solver_control); + auto S_inv = inverse_operator(S, solver_cg, A_inv); + + lambda = S_inv * embedded_rhs; + + solution = K_inv * Ct * lambda; + + constraints.distribute(solution); + } + + // Standard result output on two separate files, one + // for each mesh + template + void DistributedLagrangeProblem::output_results() + { + TimerOutput::Scope timer_section(monitor, "Output results"); + + DataOut embedding_out; + + std::ofstream embedding_out_file("embedding.vtu"); + + embedding_out.attach_dof_handler(*space_dh); + embedding_out.add_data_vector(solution, "solution"); + embedding_out.build_patches(parameters.embedding_space_finite_element_degree); + embedding_out.write_vtu(embedding_out_file); + + // The only difference between the two output routines, is that in the + // second case, we want to output the data on the current configuration, and + // not on the reference one. This is possible by passing the actual + // embedded_mapping to the DataOut::build_patches function. The mapping will + // take care of outputting the result on the actual deformed configuration. + + DataOut > embedded_out; + + std::ofstream embedded_out_file("embedded.vtu"); + + embedded_out.attach_dof_handler(*embedded_dh); + embedded_out.add_data_vector(lambda, "lambda"); + embedded_out.add_data_vector(embedded_value, "g"); + embedded_out.build_patches(*embedded_mapping, + parameters.embedded_space_finite_element_degree); + embedded_out.write_vtu(embedded_out_file); + } +} + + + +int main() +{ + try + { + using namespace dealii; + using namespace Step60; + + const unsigned int dim=1, spacedim=2; + + // Differently to what happens in other tutorial programs, here we the the + // ParameterAcceptor style of initialization, i.e., all objects are first + // constructed, and then a single call to the static method + // ParameterAcceptor::initialize is issued to fill all parameters of the + // classes that are derived from ParameterAcceptor. + + DistributedLagrangeProblem::DistributedLagrangeProblemParameters parameters; + DistributedLagrangeProblem problem(parameters); + ParameterAcceptor::initialize("parameters.prm", "used_parameters.prm"); + problem.run(); + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + return 0; +}