From: frohne Date: Fri, 14 Oct 2011 19:37:41 +0000 (+0000) Subject: first version of step-41, does not compile yet X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ea35f0ef9a2d5aac9119bb7771a2b2b15dc0225e;p=dealii-svn.git first version of step-41, does not compile yet git-svn-id: https://svn.dealii.org/trunk@24605 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-41/Makefile b/deal.II/examples/step-41/Makefile new file mode 100644 index 0000000000..031d9128ce --- /dev/null +++ b/deal.II/examples/step-41/Makefile @@ -0,0 +1,143 @@ +# $Id: Makefile 24349 2011-09-21 08:38:41Z kronbichler $ + + +# For the small projects Makefile, you basically need to fill in only +# four fields. +# +# The first is the name of the application. It is assumed that the +# application name is the same as the base file name of the single C++ +# file from which the application is generated. +target = step-41 + +# The second field determines whether you want to run your program in +# debug or optimized mode. The latter is significantly faster, but no +# run-time checking of parameters and internal states is performed, so +# you should set this value to `on' while you develop your program, +# and to `off' when running production computations. +debug-mode = on + + +# As third field, we need to give the path to the top-level deal.II +# directory. You need to adjust this to your needs. Since this path is +# probably the most often needed one in the Makefile internals, it is +# designated by a single-character variable, since that can be +# reference using $D only, i.e. without the parentheses that are +# required for most other parameters, as e.g. in $(target). +D = ../deal.II/ + + +# The last field specifies the names of data and other files that +# shall be deleted when calling `make clean'. Object and backup files, +# executables and the like are removed anyway. Here, we give a list of +# files in the various output formats that deal.II supports. +clean-up-files = *gmv *gnuplot *gpl *eps *pov *vtk *ucd *.d2 + + + + +# +# +# Usually, you will not need to change anything beyond this point. +# +# +# The next statement tells the `make' program where to find the +# deal.II top level directory and to include the file with the global +# settings +include $D/common/Make.global_options + + +# Since the whole project consists of only one file, we need not +# consider difficult dependencies. We only have to declare the +# libraries which we want to link to the object file. deal.II has two +# libraries: one for the debug mode version of the +# application and one for optimized mode. +libs.g := $(lib-deal2.g) +libs.o := $(lib-deal2.o) + + +# We now use the variable defined above to switch between debug and +# optimized mode to select the set of libraries to link with. Included +# in the list of libraries is the name of the object file which we +# will produce from the single C++ file. Note that by default we use +# the extension .g.o for object files compiled in debug mode and .o for +# object files in optimized mode (or whatever local default on your +# system is instead of .o) +ifeq ($(debug-mode),on) + libraries = $(target).g.$(OBJEXT) $(libs.g) +else + libraries = $(target).$(OBJEXT) $(libs.o) +endif + + +# Now comes the first production rule: how to link the single object +# file produced from the single C++ file into the executable. Since +# this is the first rule in the Makefile, it is the one `make' selects +# if you call it without arguments. +$(target)$(EXEEXT) : $(libraries) + @echo ============================ Linking $@ + @$(CXX) -o $@ $^ $(LIBS) $(LDFLAGS) + + +# To make running the application somewhat independent of the actual +# program name, we usually declare a rule `run' which simply runs the +# program. You can then run it by typing `make run'. This is also +# useful if you want to call the executable with arguments which do +# not change frequently. You may then want to add them to the +# following rule: +run: $(target)$(EXEEXT) + @echo ============================ Running $< + @./$(target)$(EXEEXT) + + +# As a last rule to the `make' program, we define what to do when +# cleaning up a directory. This usually involves deleting object files +# and other automatically created files such as the executable itself, +# backup files, and data files. Since the latter are not usually quite +# diverse, you needed to declare them at the top of this file. +clean: + -rm -f *.$(OBJEXT) *~ Makefile.dep $(target)$(EXEEXT) $(clean-up-files) + + +# Since we have not yet stated how to make an object file from a C++ +# file, we should do so now. Since the many flags passed to the +# compiler are usually not of much interest, we suppress the actual +# command line using the `at' sign in the first column of the rules +# and write the string indicating what we do instead. +./%.g.$(OBJEXT) : + @echo "==============debug========= $( $@" + @$(CXX) $(CXXFLAGS.g) -c $< -o $@ +./%.$(OBJEXT) : + @echo "==============optimized===== $( $@" + @$(CXX) $(CXXFLAGS.o) -c $< -o $@ + + +# The following statement tells make that the rules `run' and `clean' +# are not expected to produce files of the same name as Makefile rules +# usually do. +.PHONY: run clean + + +# Finally there is a rule which you normally need not care much about: +# since the executable depends on some include files from the library, +# besides the C++ application file of course, it is necessary to +# re-generate the executable when one of the files it depends on has +# changed. The following rule creates a dependency file +# `Makefile.dep', which `make' uses to determine when to regenerate +# the executable. This file is automagically remade whenever needed, +# i.e. whenever one of the cc-/h-files changed. Make detects whether +# to remake this file upon inclusion at the bottom of this file. +# +# If the creation of Makefile.dep fails, blow it away and fail +Makefile.dep: $(target).cc Makefile \ + $(shell echo $D/include/deal.II/*/*.h) + @echo ============================ Remaking $@ + @$D/common/scripts/make_dependencies $(INCLUDE) -B. $(target).cc \ + > $@ \ + || (rm -f $@ ; false) + @if test -s $@ ; then : else rm $@ ; fi + +# To make the dependencies known to `make', we finally have to include +# them: +include Makefile.dep + + diff --git a/deal.II/examples/step-41/step-41.cc b/deal.II/examples/step-41/step-41.cc new file mode 100644 index 0000000000..9d32583279 --- /dev/null +++ b/deal.II/examples/step-41/step-41.cc @@ -0,0 +1,916 @@ +/* $Id: step-4.cc 24093 2011-08-16 13:58:12Z bangerth $ */ +/* Author: Wolfgang Bangerth, University of Heidelberg, 1999 */ + +/* $Id: step-4.cc 24093 2011-08-16 13:58:12Z bangerth $ */ +/* */ +/* Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 by the deal.II authors */ +/* */ +/* This file is subject to QPL and may not be distributed */ +/* without copyright and license information. Please refer */ +/* to the file deal.II/doc/license.html for the text and */ +/* further information on this license. */ + + // @sect3{Include files} + + // The first few (many?) include + // files have already been used in + // the previous example, so we will + // not explain their meaning here + // again. +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + // This is new, however: in the previous + // example we got some unwanted output from + // the linear solvers. If we want to suppress + // it, we have to include this file and add a + // single line somewhere to the program (see + // the main() function below for that): +#include + + // The final step, as in previous + // programs, is to import all the + // deal.II class and function names + // into the global namespace: +using namespace dealii; + + // @sect3{The Step4 class template} + + // This is again the same + // Step4 class as in the + // previous example. The only + // difference is that we have now + // declared it as a class with a + // template parameter, and the + // template parameter is of course + // the spatial dimension in which we + // would like to solve the Laplace + // equation. Of course, several of + // the member variables depend on + // this dimension as well, in + // particular the Triangulation + // class, which has to represent + // quadrilaterals or hexahedra, + // respectively. Apart from this, + // everything is as before. +template +class Step4 +{ + public: + Step4 (); + void run (); + + private: + void make_grid (); + void setup_system(); + void assemble_system (); + void projection_active_set (); + void solve (); + void output_results (Vector vector_to_plot, const std::string& title) const; + + Triangulation triangulation; + FE_Q fe; + DoFHandler dof_handler; + + ConstraintMatrix constraints; + + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; + SparseMatrix system_matrix_complete; + + Vector solution; + Vector system_rhs; + Vector system_rhs_complete; + Vector resid_vector; + Vector active_set; + + std::map boundary_values; +}; + + + // @sect3{Right hand side and boundary values} + + // In the following, we declare two more + // classes denoting the right hand side and + // the non-homogeneous Dirichlet boundary + // values. Both are functions of a + // dim-dimensional space variable, so we + // declare them as templates as well. + // + // Each of these classes is derived from a + // common, abstract base class Function, + // which declares the common interface which + // all functions have to follow. In + // particular, concrete classes have to + // overload the value function, + // which takes a point in dim-dimensional + // space as parameters and shall return the + // value at that point as a + // double variable. + // + // The value function takes a + // second argument, which we have here named + // component: This is only meant + // for vector valued functions, where you may + // want to access a certain component of the + // vector at the point + // p. However, our functions are + // scalar, so we need not worry about this + // parameter and we will not use it in the + // implementation of the functions. Inside + // the library's header files, the Function + // base class's declaration of the + // value function has a default + // value of zero for the component, so we + // will access the value + // function of the right hand side with only + // one parameter, namely the point where we + // want to evaluate the function. A value for + // the component can then simply be omitted + // for scalar functions. + // + // Note that the C++ language forces + // us to declare and define a + // constructor to the following + // classes even though they are + // empty. This is due to the fact + // that the base class has no default + // constructor (i.e. one without + // arguments), even though it has a + // constructor which has default + // values for all arguments. +template +class RightHandSide : public Function +{ + public: + RightHandSide () : Function() {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; +}; + + + +template +class BoundaryValues : public Function +{ + public: + BoundaryValues () : Function() {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; +}; + +template +class Obstacle : public Function +{ + public: + Obstacle () : Function() {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; +}; + + + + // For this example, we choose as right hand + // side function to function $4(x^4+y^4)$ in + // 2D, or $4(x^4+y^4+z^4)$ in 3D. We could + // write this distinction using an + // if-statement on the space dimension, but + // here is a simple way that also allows us + // to use the same function in 1D (or in 4D, + // if you should desire to do so), by using a + // short loop. Fortunately, the compiler + // knows the size of the loop at compile time + // (remember that at the time when you define + // the template, the compiler doesn't know + // the value of dim, but when it later + // encounters a statement or declaration + // RightHandSide@<2@>, it will take the + // template, replace all occurrences of dim + // by 2 and compile the resulting function); + // in other words, at the time of compiling + // this function, the number of times the + // body will be executed is known, and the + // compiler can optimize away the overhead + // needed for the loop and the result will be + // as fast as if we had used the formulas + // above right away. + // + // The last thing to note is that a + // Point@ denotes a point in + // dim-dimensionsal space, and its individual + // components (i.e. $x$, $y$, + // ... coordinates) can be accessed using the + // () operator (in fact, the [] operator will + // work just as well) with indices starting + // at zero as usual in C and C++. +template +double RightHandSide::value (const Point &p, + const unsigned int /*component*/) const +{ + double return_value = 0; + // for (unsigned int i=0; i +double BoundaryValues::value (const Point &p, + const unsigned int /*component*/) const +{ + double return_value = 0; + + return return_value; +} + +template +double Obstacle::value (const Point &p, + const unsigned int /*component*/) const +{ + return p.square() - 0.5; +} + + + + // @sect3{Implementation of the Step4 class} + + // Next for the implementation of the class + // template that makes use of the functions + // above. As before, we will write everything + // as templates that have a formal parameter + // dim that we assume unknown at + // the time we define the template + // functions. Only later, the compiler will + // find a declaration of + // Step4@<2@> (in the + // main function, actually) and + // compile the entire class with + // dim replaced by 2, a process + // referred to as `instantiation of a + // template'. When doing so, it will also + // replace instances of + // RightHandSide@ by + // RightHandSide@<2@> and + // instantiate the latter class from the + // class template. + // + // In fact, the compiler will also find a + // declaration + // Step4@<3@> in + // main(). This will cause it to + // again go back to the general + // Step4@ + // template, replace all occurrences of + // dim, this time by 3, and + // compile the class a second time. Note that + // the two instantiations + // Step4@<2@> and + // Step4@<3@> are + // completely independent classes; their only + // common feature is that they are both + // instantiated from the same general + // template, but they are not convertible + // into each other, for example, and share no + // code (both instantiations are compiled + // completely independently). + + + // @sect4{Step4::Step4} + + // After this introduction, here is the + // constructor of the Step4 + // class. It specifies the desired polynomial + // degree of the finite elements and + // associates the DoFHandler to the + // triangulation just as in the previous + // example program, step-3: +template +Step4::Step4 () + : + fe (1), + dof_handler (triangulation) +{} + + + // @sect4{Step4::make_grid} + + // Grid creation is something inherently + // dimension dependent. However, as long as + // the domains are sufficiently similar in 2D + // or 3D, the library can abstract for + // you. In our case, we would like to again + // solve on the square $[-1,1]\times [-1,1]$ + // in 2D, or on the cube $[-1,1] \times + // [-1,1] \times [-1,1]$ in 3D; both can be + // termed GridGenerator::hyper_cube(), so we may + // use the same function in whatever + // dimension we are. Of course, the functions + // that create a hypercube in two and three + // dimensions are very much different, but + // that is something you need not care + // about. Let the library handle the + // difficult things. +template +void Step4::make_grid () +{ + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (6); + + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Total number of cells: " + << triangulation.n_cells() + << std::endl; +} + + // @sect4{Step4::setup_system} + + // This function looks + // exactly like in the previous example, + // although it performs actions that in their + // details are quite different if + // dim happens to be 3. The only + // significant difference from a user's + // perspective is the number of cells + // resulting, which is much higher in three + // than in two space dimensions! +template +void Step4::setup_system () +{ + dof_handler.distribute_dofs (fe); + + std::cout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; + + CompressedSparsityPattern c_sparsity(dof_handler.n_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, c_sparsity); + sparsity_pattern.copy_from(c_sparsity); + + system_matrix.reinit (sparsity_pattern); + system_matrix_complete.reinit (sparsity_pattern); + + solution.reinit (dof_handler.n_dofs()); + system_rhs.reinit (dof_handler.n_dofs()); + system_rhs_complete.reinit (dof_handler.n_dofs()); + resid_vector.reinit (dof_handler.n_dofs()); + active_set.reinit (dof_handler.n_dofs()); +} + + + // @sect4{Step4::assemble_system} + + // Unlike in the previous example, we + // would now like to use a + // non-constant right hand side + // function and non-zero boundary + // values. Both are tasks that are + // readily achieved with a only a few + // new lines of code in the + // assemblage of the matrix and right + // hand side. + // + // More interesting, though, is the + // way we assemble matrix and right + // hand side vector dimension + // independently: there is simply no + // difference to the + // two-dimensional case. Since the + // important objects used in this + // function (quadrature formula, + // FEValues) depend on the dimension + // by way of a template parameter as + // well, they can take care of + // setting up properly everything for + // the dimension for which this + // function is compiled. By declaring + // all classes which might depend on + // the dimension using a template + // parameter, the library can make + // nearly all work for you and you + // don't have to care about most + // things. +template +void Step4::assemble_system () +{ + QGauss quadrature_formula(2); + + // We wanted to have a non-constant right + // hand side, so we use an object of the + // class declared above to generate the + // necessary data. Since this right hand + // side object is only used locally in the + // present function, we declare it here as + // a local variable: + const RightHandSide right_hand_side; + + // Compared to the previous example, in + // order to evaluate the non-constant right + // hand side function we now also need the + // quadrature points on the cell we are + // presently on (previously, we only + // required values and gradients of the + // shape function from the + // FEValues object, as well as + // the quadrature weights, + // FEValues::JxW() ). We can tell the + // FEValues object to do for + // us by also giving it the + // #update_quadrature_points + // flag: + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + // We then again define a few + // abbreviations. The values of these + // variables of course depend on the + // dimension which we are presently + // using. However, the FE and Quadrature + // classes do all the necessary work for + // you and you don't have to care about the + // dimension dependent parts: + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + // Next, we again have to loop over all + // cells and assemble local contributions. + // Note, that a cell is a quadrilateral in + // two space dimensions, but a hexahedron + // in 3D. In fact, the + // active_cell_iterator data + // type is something different, depending + // on the dimension we are in, but to the + // outside world they look alike and you + // will probably never see a difference + // although the classes that this typedef + // stands for are in fact completely + // unrelated: + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + cell_matrix = 0; + cell_rhs = 0; + + // Now we have to assemble the + // local matrix and right hand + // side. This is done exactly + // like in the previous + // example, but now we revert + // the order of the loops + // (which we can safely do + // since they are independent + // of each other) and merge the + // loops for the local matrix + // and the local vector as far + // as possible to make + // things a bit faster. + // + // Assembling the right hand side + // presents the only significant + // difference to how we did things in + // step-3: Instead of using a constant + // right hand side with value 1, we use + // the object representing the right + // hand side and evaluate it at the + // quadrature points: + for (unsigned int q_point=0; q_pointcell_matrix(i,j), we + // have to multiply the gradients of + // shape functions $i$ and $j$ at point + // q_point and multiply it with the + // scalar weights JxW. This is what + // actually happens: + // fe_values.shape_grad(i,q_point) + // returns a dim + // dimensional vector, represented by a + // Tensor@<1,dim@> object, + // and the operator* that multiplies it + // with the result of + // fe_values.shape_grad(j,q_point) + // makes sure that the dim + // components of the two vectors are + // properly contracted, and the result + // is a scalar floating point number + // that then is multiplied with the + // weights. Internally, this operator* + // makes sure that this happens + // correctly for all dim + // components of the vectors, whether + // dim be 2, 3, or any + // other space dimension; from a user's + // perspective, this is not something + // worth bothering with, however, + // making things a lot simpler if one + // wants to write code dimension + // independently. + + // With the local systems assembled, + // the transfer into the global matrix + // and right hand side is done exactly + // as before, but here we have again + // merged some loops for efficiency: + cell->get_dof_indices (local_dof_indices); +// for (unsigned int i=0; iBoundaryValues +// // class declared above): +// +// MatrixTools::apply_boundary_values (boundary_values, +// system_matrix, +// solution, +// system_rhs); +} + + // @sect4{Step4::projection_active_set} + + // Projection and updating of the active set + // for the dofs which penetrates the obstacle. +template +void Step4::projection_active_set () +{ +// const Obstacle obstacle; +// std::vector vertex_touched (triangulation.n_vertices(), +// false); +// +// boundary_values.clear (); +// VectorTools::interpolate_boundary_values (dof_handler, +// 0, +// BoundaryValues(), +// boundary_values); +// +// typename DoFHandler::active_cell_iterator +// cell = dof_handler.begin_active(), +// endc = dof_handler.end(); +// +// active_set = 0; +// unsigned int n = 0; +// for (; cell!=endc; ++cell) +// for (unsigned int v=0; v::vertices_per_cell; ++v) +// { +// if (vertex_touched[cell->vertex_index(v)] == false) +// { +// vertex_touched[cell->vertex_index(v)] = true; +// unsigned int index_x = cell->vertex_dof_index (v,0); +// // unsigned int index_y = cell->vertex_dof_index (v,1); +// +// Point point (cell->vertex (v)[0], cell->vertex (v)[1]); +// double obstacle_value = obstacle.value (point); +// if (solution (index_x) >= obstacle_value && resid_vector (index_x) <= 0) +// { +// solution (index_x) = obstacle_value; +// boundary_values.insert (std::pair(index_x, obstacle_value)); +// active_set (index_x) = 1; +// n += 1; +// } +// } +// } +// std::cout<< "Number of active contraints: " << n < obstacle; + std::vector vertex_touched (triangulation.n_vertices(), + false); + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + + constraints.clear(); + active_set = 0; + for (; cell!=endc; ++cell) + for (unsigned int v=0; v::vertices_per_cell; ++v) + { + unsigned int index_x = cell->vertex_dof_index (v,0); + + Point point (cell->vertex (v)[0], cell->vertex (v)[1]); + double obstacle_value = obstacle.value (point); + if (solution (index_x) >= obstacle_value && resid_vector (index_x) <= 0) + { + + constraints.add_line (index_x); + constraints.set_inhomogeneity (index_x, obstacle_value); + solution (index_x) = obstacle_value; + active_set (index_x) = 1; + } + } + + VectorTools::interpolate_boundary_values (dof_handler, + 0, + BoundaryValues(), + constraints); + constraints.close (); +} + + // @sect4{Step4::solve} + + // Solving the linear system of + // equations is something that looks + // almost identical in most + // programs. In particular, it is + // dimension independent, so this + // function is copied verbatim from the + // previous example. +template +void Step4::solve () +{ + ReductionControl reduction_control (100, 1e-12, 1e-2); + SolverCG<> solver (reduction_control); + PreconditionSSOR > precondition; + precondition.initialize (system_matrix, 1.5); + solver.solve (system_matrix, solution, system_rhs, precondition); + // PreconditionIdentity()); + + // We have made one addition, + // though: since we suppress output + // from the linear solvers, we have + // to print the number of + // iterations by hand. + std::cout << " " << reduction_control.last_step() + << " CG iterations needed to obtain convergence." + << std::endl; + + constraints.distribute (solution); +} + + // @sect4{Step4::output_results} + + // This function also does what the + // respective one did in step-3. No changes + // here for dimension independence either. + // + // The only difference to the previous + // example is that we want to write output in + // VTK format, rather than for gnuplot. VTK + // format is currently the most widely used + // one and is supported by a number of + // visualization programs such as Visit and + // Paraview (for ways to obtain these + // programs see the ReadMe file of + // deal.II). To write data in this format, we + // simply replace the + // data_out.write_gnuplot call + // by data_out.write_vtk. + // + // Since the program will run both 2d and 3d + // versions of the laplace solver, we use the + // dimension in the filename to generate + // distinct filenames for each run (in a + // better program, one would check whether + // dim can have other values + // than 2 or 3, but we neglect this here for + // the sake of brevity). +template +void Step4::output_results (Vector vector_to_plot, const std::string& title) const +{ + DataOut data_out; + + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (vector_to_plot, "vector_to_plot"); + + data_out.build_patches (); + + std::ofstream output_vtk (dim == 2 ? + (title + ".vtk").c_str () : + (title + ".vtk").c_str ()); + data_out.write_vtk (output_vtk); + + std::ofstream output_gnuplot (dim == 2 ? + (title + ".gp").c_str () : + (title + ".gp").c_str ()); + data_out.write_gnuplot (output_gnuplot); +} + + + + // @sect4{Step4::run} + + // This is the function which has the + // top-level control over + // everything. Apart from one line of + // additional output, it is the same + // as for the previous example. +template +void Step4::run () +{ + std::cout << "Solving problem in " << dim << " space dimensions." << std::endl; + + make_grid(); + setup_system (); + + std::cout<< "Update Active Set in Dim = " << dim <