From 9550399b804a5176572dbeff82e4bb018f5302a1 Mon Sep 17 00:00:00 2001 From: "(no author)" <(no author)@0785d39b-7218-0410-832d-ea1e28bc413d> Date: Sat, 18 Dec 2004 18:04:32 +0000 Subject: [PATCH] This commit was manufactured by cvs2svn to create branch 'Branch-5-1'. git-svn-id: https://svn.dealii.org/branches/Branch-5-1@9824 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/base/Attic/doc/Makefile | 27 - deal.II/base/Attic/doc/cvslog/.cvsignore | 1 - deal.II/base/Attic/doc/doc.kdoc/.cvsignore | 2 - .../base/Attic/doc/doc.kdoc/base/.cvsignore | 1 - deal.II/deal.II/Attic/examples/Makefile | 31 - deal.II/deal.II/Attic/examples/README | 11 - .../Attic/examples/convergence/.cvsignore | 4 - .../Attic/examples/convergence/Makefile | 169 --- .../Attic/examples/convergence/convergence.cc | 548 -------- .../Attic/examples/convergence/make_ps | 52 - deal.II/deal.II/Attic/examples/dof/.cvsignore | 5 - deal.II/deal.II/Attic/examples/dof/Makefile | 172 --- .../deal.II/Attic/examples/dof/dof_test.cc | 451 ------ .../deal.II/Attic/examples/dof/dof_test.prm | 4 - deal.II/deal.II/Attic/examples/dof/make_ps | 122 -- .../Attic/examples/dof/results/.cvsignore | 1 - .../examples/error-estimation/.cvsignore | 4 - .../Attic/examples/error-estimation/Makefile | 172 --- .../examples/error-estimation/ee.gauss.prm | 8 - .../examples/error-estimation/ee.kink.prm | 8 - .../examples/error-estimation/ee.singular.prm | 8 - .../error-estimation/error-estimation.cc | 755 ---------- .../Attic/examples/error-estimation/make_ps | 94 -- .../examples/error-estimation/strip_comments | 1 - .../deal.II/Attic/examples/grid/.cvsignore | 4 - deal.II/deal.II/Attic/examples/grid/Makefile | 178 --- .../deal.II/Attic/examples/grid/grid_test.cc | 329 ----- deal.II/deal.II/Attic/examples/grid/make_ps | 43 - .../Attic/examples/grid/results/.cvsignore | 1 - .../Attic/examples/multigrid/.cvsignore | 4 - .../deal.II/Attic/examples/multigrid/Makefile | 169 --- .../deal.II/Attic/examples/multigrid/make_ps | 52 - .../Attic/examples/multigrid/multigrid.cc | 515 ------- .../nonlinear/fixed-point-iteration/Makefile | 133 -- .../fixed-point-iteration/nonlinear.cc | 253 ---- .../deal.II/Attic/examples/poisson/.cvsignore | 6 - .../deal.II/Attic/examples/poisson/Makefile | 134 -- .../Attic/examples/poisson/equation.cc | 86 -- .../deal.II/Attic/examples/poisson/poisson.cc | 29 - .../deal.II/Attic/examples/poisson/poisson.h | 101 -- .../Attic/examples/poisson/poisson.prm | 5 - .../deal.II/Attic/examples/poisson/problem.cc | 622 --------- .../Attic/examples/poisson/results/.cvsignore | 2 - .../Attic/examples/poisson/results/make_ps | 38 - .../Attic/examples/step-by-step/Makefile | 36 - .../examples/step-by-step/step-1/.cvsignore | 2 - .../examples/step-by-step/step-1/Makefile | 167 --- .../examples/step-by-step/step-1/step-1.cc | 228 --- .../examples/step-by-step/step-2/.cvsignore | 2 - .../examples/step-by-step/step-2/Makefile | 167 --- .../examples/step-by-step/step-2/step-2.cc | 361 ----- .../examples/step-by-step/step-3/.cvsignore | 2 - .../examples/step-by-step/step-3/Makefile | 167 --- .../examples/step-by-step/step-3/step-3.cc | 829 ----------- .../examples/step-by-step/step-4/.cvsignore | 2 - .../examples/step-by-step/step-4/Makefile | 169 --- .../examples/step-by-step/step-4/step-4.cc | 607 -------- .../examples/step-by-step/step-5/.cvsignore | 2 - .../examples/step-by-step/step-5/Makefile | 167 --- .../step-by-step/step-5/circle-grid.inp | 46 - .../examples/step-by-step/step-5/step-5.cc | 940 ------------- .../examples/step-by-step/step-6/.cvsignore | 2 - .../examples/step-by-step/step-6/Makefile | 167 --- .../examples/step-by-step/step-6/step-6.cc | 1035 -------------- .../examples/step-by-step/step-7/.cvsignore | 2 - .../examples/step-by-step/step-7/Makefile | 167 --- .../examples/step-by-step/step-7/step-7.cc | 1242 ----------------- .../examples/step-by-step/step-8/.cvsignore | 2 - .../examples/step-by-step/step-8/Makefile | 167 --- .../examples/step-by-step/step-8/step-8.cc | 1077 -------------- deal.II/doc/news/c-5.0.html | 540 +++++++ deal.II/lac/Attic/doc/Makefile | 27 - deal.II/lac/Attic/doc/cvslog/.cvsignore | 1 - deal.II/lac/Attic/doc/doc.kdoc/.cvsignore | 2 - deal.II/lac/include/lac/solver_gmres.h | 402 +++--- 75 files changed, 725 insertions(+), 13357 deletions(-) delete mode 100644 deal.II/base/Attic/doc/Makefile delete mode 100644 deal.II/base/Attic/doc/cvslog/.cvsignore delete mode 100644 deal.II/base/Attic/doc/doc.kdoc/.cvsignore delete mode 100644 deal.II/base/Attic/doc/doc.kdoc/base/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/README delete mode 100644 deal.II/deal.II/Attic/examples/convergence/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/convergence/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/convergence/convergence.cc delete mode 100644 deal.II/deal.II/Attic/examples/convergence/make_ps delete mode 100644 deal.II/deal.II/Attic/examples/dof/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/dof/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/dof/dof_test.cc delete mode 100644 deal.II/deal.II/Attic/examples/dof/dof_test.prm delete mode 100644 deal.II/deal.II/Attic/examples/dof/make_ps delete mode 100644 deal.II/deal.II/Attic/examples/dof/results/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/error-estimation/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/error-estimation/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/error-estimation/ee.gauss.prm delete mode 100644 deal.II/deal.II/Attic/examples/error-estimation/ee.kink.prm delete mode 100644 deal.II/deal.II/Attic/examples/error-estimation/ee.singular.prm delete mode 100644 deal.II/deal.II/Attic/examples/error-estimation/error-estimation.cc delete mode 100644 deal.II/deal.II/Attic/examples/error-estimation/make_ps delete mode 100755 deal.II/deal.II/Attic/examples/error-estimation/strip_comments delete mode 100644 deal.II/deal.II/Attic/examples/grid/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/grid/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/grid/grid_test.cc delete mode 100644 deal.II/deal.II/Attic/examples/grid/make_ps delete mode 100644 deal.II/deal.II/Attic/examples/grid/results/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/multigrid/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/multigrid/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/multigrid/make_ps delete mode 100644 deal.II/deal.II/Attic/examples/multigrid/multigrid.cc delete mode 100644 deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/nonlinear.cc delete mode 100644 deal.II/deal.II/Attic/examples/poisson/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/poisson/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/poisson/equation.cc delete mode 100644 deal.II/deal.II/Attic/examples/poisson/poisson.cc delete mode 100644 deal.II/deal.II/Attic/examples/poisson/poisson.h delete mode 100644 deal.II/deal.II/Attic/examples/poisson/poisson.prm delete mode 100644 deal.II/deal.II/Attic/examples/poisson/problem.cc delete mode 100644 deal.II/deal.II/Attic/examples/poisson/results/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/poisson/results/make_ps delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-1/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-1/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-1/step-1.cc delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-2/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-2/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-2/step-2.cc delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-3/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-3/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-3/step-3.cc delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-4/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-4/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-4/step-4.cc delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-5/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-5/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-5/circle-grid.inp delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-5/step-5.cc delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-6/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-6/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-6/step-6.cc delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-7/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-7/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-7/step-7.cc delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-8/.cvsignore delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-8/Makefile delete mode 100644 deal.II/deal.II/Attic/examples/step-by-step/step-8/step-8.cc create mode 100644 deal.II/doc/news/c-5.0.html delete mode 100644 deal.II/lac/Attic/doc/Makefile delete mode 100644 deal.II/lac/Attic/doc/cvslog/.cvsignore delete mode 100644 deal.II/lac/Attic/doc/doc.kdoc/.cvsignore diff --git a/deal.II/base/Attic/doc/Makefile b/deal.II/base/Attic/doc/Makefile deleted file mode 100644 index 36fbd4e8a7..0000000000 --- a/deal.II/base/Attic/doc/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -# $Id$ - - -KDOCFLAGS = -I../../../deal.II/doc/kdoc ../../../deal.II/doc/kdoc/kdoc -a -p -kdoc.inc = $(wildcard ../include/base/*.h) - - - -doc-html: kdoc - - -# make kdoc doc; make sure that the *.kdoc files exist by -# using the dependancies and the following rules. -kdoc: $(kdoc.inc) - cd doc.kdoc ; perl $(KDOCFLAGS) -ubase -dbase \ - base $(kdoc.inc:..%=../..%) - -cvslog: - @cd .. ; ../deal.II/doc/cvslog/cvs2html -o doc/cvslog/base #-a -k - -clean: - rm -f doc.kdoc/base/* \ - cvslog/* \ - *~ - - -.PHONY: doc-html kdoc cvslog clean diff --git a/deal.II/base/Attic/doc/cvslog/.cvsignore b/deal.II/base/Attic/doc/cvslog/.cvsignore deleted file mode 100644 index 2d19fc766d..0000000000 --- a/deal.II/base/Attic/doc/cvslog/.cvsignore +++ /dev/null @@ -1 +0,0 @@ -*.html diff --git a/deal.II/base/Attic/doc/doc.kdoc/.cvsignore b/deal.II/base/Attic/doc/doc.kdoc/.cvsignore deleted file mode 100644 index fc2f83228f..0000000000 --- a/deal.II/base/Attic/doc/doc.kdoc/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.kdoc -base diff --git a/deal.II/base/Attic/doc/doc.kdoc/base/.cvsignore b/deal.II/base/Attic/doc/doc.kdoc/base/.cvsignore deleted file mode 100644 index 2d19fc766d..0000000000 --- a/deal.II/base/Attic/doc/doc.kdoc/base/.cvsignore +++ /dev/null @@ -1 +0,0 @@ -*.html diff --git a/deal.II/deal.II/Attic/examples/Makefile b/deal.II/deal.II/Attic/examples/Makefile deleted file mode 100644 index 2be5604543..0000000000 --- a/deal.II/deal.II/Attic/examples/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998 - - -# list the directories we want to visit -subdirs = grid/ dof/ poisson/ convergence/ error-estimation/ multigrid/ step-by-step/ - -# define lists of targets: for each directory we produce a target name -# for compilation, running and cleaning by appending the action to -# the directory name (replacing the slash by ".action") -compile = $(subdirs:/=.compile) -run = $(subdirs:/=.run) -clean = $(subdirs:/=.clean) - -# define global targets which are to be excuted in every subdirectory -compile: $(compile) -run : $(run) -# for cleaning up: do this also for the present directory -clean : $(clean) - -rm -f *~ - - -# define the action of the targets for the specific subdirectories -$(compile) : - cd $(@:.compile=) ; $(MAKE) - -$(run) : - cd $(@:.run=) ; $(MAKE) run - -$(clean) : - -cd $(@:.clean=) ; $(MAKE) clean diff --git a/deal.II/deal.II/Attic/examples/README b/deal.II/deal.II/Attic/examples/README deleted file mode 100644 index a4c518425b..0000000000 --- a/deal.II/deal.II/Attic/examples/README +++ /dev/null @@ -1,11 +0,0 @@ -The example applications in the subdirectories (apart from the -'step-by-step' directory) were written in the early stages of the -library and served more the task of verification than as proper -examples. For this reason, they are not very well documented and are -probably no good examples anyway. - -One, the multigrid example, does not even what its name may suggest. - -We excuse for the fact that they might not serve as good -examples. Better ones are planned and in parts written, but not yet -available at present. Sorry. diff --git a/deal.II/deal.II/Attic/examples/convergence/.cvsignore b/deal.II/deal.II/Attic/examples/convergence/.cvsignore deleted file mode 100644 index 61fca49931..0000000000 --- a/deal.II/deal.II/Attic/examples/convergence/.cvsignore +++ /dev/null @@ -1,4 +0,0 @@ -convergence -Makefile.dep -*.go -*.o diff --git a/deal.II/deal.II/Attic/examples/convergence/Makefile b/deal.II/deal.II/Attic/examples/convergence/Makefile deleted file mode 100644 index 3b00259d3f..0000000000 --- a/deal.II/deal.II/Attic/examples/convergence/Makefile +++ /dev/null @@ -1,169 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998, 1999, 2000 - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = convergence - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../.. - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - gnuplot make_ps - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/convergence/convergence.cc b/deal.II/deal.II/Attic/examples/convergence/convergence.cc deleted file mode 100644 index 38bffd824e..0000000000 --- a/deal.II/deal.II/Attic/examples/convergence/convergence.cc +++ /dev/null @@ -1,548 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - - - - - -template -class PoissonEquation : public Equation { - public: - PoissonEquation (const Function &rhs) : - Equation(1), - right_hand_side (rhs) {}; - - virtual void assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (FullMatrix &cell_matrix, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - protected: - const Function &right_hand_side; -}; - - - - - - -template -class PoissonProblem : public ProblemBase { - public: - PoissonProblem (unsigned int order); - ~PoissonProblem (); - - void clear (); - void create_new (); - int run (unsigned int level); - void print_history (string filename) const; - - protected: - Triangulation *tria; - DoFHandler *dof; - - Function *rhs; - Function *boundary_values; - - vector l1_error, l2_error, linfty_error, h1_seminorm_error, h1_error; - vector n_dofs; - - unsigned int order; -}; - - - - - -/** - Right hand side constructed such that the exact solution is - $sin(2 pi x) + sin(2 pi y)$ - */ -template -class RHSPoly : public Function { - public: - /** - * Return the value of the function - * at the given point. - */ - virtual double value (const Point &p, - const unsigned int component) const; -}; - - - -template -class Solution : public Function { - public: - /** - * Return the value of the function - * at the given point. - */ - virtual double value (const Point &p, - const unsigned int component) const; - /** - * Return the gradient of the function - * at the given point. - */ - virtual Tensor<1,dim> gradient (const Point &p, - const unsigned int component) const; -}; - - - - -template <> -double RHSPoly<2>::value (const Point<2> &p, - const unsigned int) const { - const double x = p(0), - y = p(1); - const double pi= 3.1415926536; - return 4*pi*pi*(sin(2*pi*x)+sin(2*pi*y)); -}; - - - -template <> -double Solution<2>::value (const Point<2> &p, - const unsigned int) const { - const double x = p(0), - y = p(1); - const double pi= 3.1415926536; - return sin(2*pi*x)+sin(2*pi*y); -}; - - -template <> -Tensor<1,2> Solution<2>::gradient (const Point<2> &p, - const unsigned int) const { - const double x = p(0), - y = p(1); - const double pi= 3.1415926536; - return Point<2> (2*pi*cos(2*pi*x), - 2*pi*cos(2*pi*y)); -}; - - - - - -template <> -void PoissonEquation<2>::assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues<2> &fe_values, - const DoFHandler<2>::cell_iterator &) const { - for (unsigned int point=0; point -void PoissonEquation::assemble (FullMatrix &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - -template -void PoissonEquation::assemble (Vector &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - - - - - - - -template -PoissonProblem::PoissonProblem (unsigned int order) : - tria(0), dof(0), rhs(0), - boundary_values(0), order(order) {}; - - -template -PoissonProblem::~PoissonProblem () -{ - clear (); -}; - - - -template -void PoissonProblem::clear () { - if (dof != 0) { - delete dof; - dof = 0; - }; - - if (tria != 0) { - delete tria; - tria = 0; - }; - - - // make it known to the underlying - // ProblemBase that tria and dof - // are already deleted - set_tria_and_dof (tria, dof); - - if (rhs != 0) - { - delete rhs; - rhs = 0; - }; - - if (boundary_values != 0) - { - delete boundary_values; - boundary_values = 0; - }; - - ProblemBase::clear (); -}; - - - - -template -void PoissonProblem::create_new () { - clear (); - - tria = new Triangulation(); - dof = new DoFHandler (*tria); - set_tria_and_dof (tria, dof); -}; - - - - -template -int PoissonProblem::run (const unsigned int level) { - create_new (); - - cout << "Refinement level = " << level - << ", using elements of type <"; - switch (order) - { - case 0: - cout << "criss-cross"; - break; - default: - cout << "Lagrange-" << order; - break; - }; - cout << ">" << endl; - - cout << " Making grid... "; - GridGenerator::hyper_ball (*tria); - HyperBallBoundary boundary_description; - tria->set_boundary (0, boundary_description); - tria->begin_active()->set_refine_flag(); - (++(++(tria->begin_active())))->set_refine_flag(); - tria->execute_coarsening_and_refinement (); - tria->refine_global (level); - cout << tria->n_active_cells() << " active cells." << endl; - - rhs = new RHSPoly(); - boundary_values = new Solution (); - - - FiniteElement *fe; - PoissonEquation equation (*rhs); - Quadrature *quadrature; - Quadrature *boundary_quadrature; - switch (order) { - case 0: - fe = new FECrissCross(); - quadrature = new QCrissCross1(); - boundary_quadrature = new QGauss2(); - break; - case 1: - fe = new FEQ1(); - quadrature = new QGauss3(); - boundary_quadrature = new QGauss2(); - break; - case 2: - fe = new FEQ2(); - quadrature = new QGauss4(); - boundary_quadrature = new QGauss3(); - break; - case 3: - fe = new FEQ3(); - quadrature = new QGauss5(); - boundary_quadrature = new QGauss4(); - break; - case 4: - fe = new FEQ4(); - quadrature = new QGauss6(); - boundary_quadrature = new QGauss5(); - break; - default: - return 100000; - }; - - cout << " Distributing dofs... "; - dof->distribute_dofs (*fe); - cout << dof->n_dofs() << " degrees of freedom." << endl; - n_dofs.push_back (dof->n_dofs()); - - cout << " Assembling matrices..." << endl; - UpdateFlags update_flags = UpdateFlags(update_values | update_q_points | - update_gradients | update_JxW_values); - - ProblemBase::FunctionMap dirichlet_bc; - dirichlet_bc[0] = boundary_values; - assemble (equation, *quadrature, update_flags, dirichlet_bc); - - cout << " Solving..." << endl; - solve (); - - Solution sol; - Vector l1_error_per_cell, l2_error_per_cell, linfty_error_per_cell; - Vector h1_seminorm_error_per_cell, h1_error_per_cell; - - cout << " Calculating L1 error... "; - VectorTools::integrate_difference (*dof_handler, - solution, sol, - l1_error_per_cell, - *quadrature, L1_norm); - cout << l1_error_per_cell.l1_norm() << endl; - l1_error.push_back (l1_error_per_cell.l1_norm()); - - cout << " Calculating L2 error... "; - VectorTools::integrate_difference (*dof_handler, - solution, sol, - l2_error_per_cell, - *quadrature, L2_norm); - cout << l2_error_per_cell.l2_norm() << endl; - l2_error.push_back (l2_error_per_cell.l2_norm()); - - cout << " Calculating L-infinity error... "; - VectorTools::integrate_difference (*dof_handler, - solution, sol, - linfty_error_per_cell, - *quadrature, Linfty_norm); - cout << linfty_error_per_cell.linfty_norm() << endl; - linfty_error.push_back (linfty_error_per_cell.linfty_norm()); - - cout << " Calculating H1-seminorm error... "; - VectorTools::integrate_difference (*dof_handler, - solution, sol, - h1_seminorm_error_per_cell, - *quadrature, H1_seminorm); - cout << h1_seminorm_error_per_cell.l2_norm() << endl; - h1_seminorm_error.push_back (h1_seminorm_error_per_cell.l2_norm()); - - cout << " Calculating H1 error... "; - VectorTools::integrate_difference (*dof_handler, - solution, sol, - h1_error_per_cell, - *quadrature, H1_norm); - cout << h1_error_per_cell.l2_norm() << endl; - h1_error.push_back (h1_error_per_cell.l2_norm()); - - if (dof->n_dofs()<=5000) - { - Vector l1_error_per_dof(dof->n_dofs()); - Vector l2_error_per_dof(dof->n_dofs()); - Vector linfty_error_per_dof(dof->n_dofs()); - Vector h1_seminorm_error_per_dof(dof->n_dofs()); - Vector h1_error_per_dof(dof->n_dofs()); - DoFTools::distribute_cell_to_dof_vector (*dof, l1_error_per_cell, l1_error_per_dof); - DoFTools::distribute_cell_to_dof_vector (*dof, l2_error_per_cell, l2_error_per_dof); - DoFTools::distribute_cell_to_dof_vector (*dof, linfty_error_per_cell, - linfty_error_per_dof); - DoFTools::distribute_cell_to_dof_vector (*dof, h1_seminorm_error_per_cell, - h1_seminorm_error_per_dof); - DoFTools::distribute_cell_to_dof_vector (*dof, h1_error_per_cell, h1_error_per_dof); - -// Vector projected_solution; -// ConstraintMatrix constraints; -// constraints.close (); -// VectorTools::project (*dof, constraints, *fe, -// StraightBoundary(), *quadrature, -// sol, projected_solution, false, -// *boundary_quadrature); -// cout << " Calculating L2 error of projected solution... "; -// VectorTools::integrate_difference (*dof_handler, -// projected_solution, sol, -// l2_error_per_cell, -// *quadrature, *fe, L2_norm); -// cout << l2_error_per_cell.l2_norm() << endl; - - - string filename; - filename = ('0'+order); - filename += "."; - filename += ('0'+level); - filename += ".ucd"; - cout << " Writing error plots to <" << filename << ">..." << endl; - - DataOut out; - ofstream o(filename.c_str()); - fill_data (out); - out.add_data_vector (l1_error_per_dof, "L1_Error"); - out.add_data_vector (l2_error_per_dof, "L2_Error"); - out.add_data_vector (linfty_error_per_dof, "Linfty_Error"); - out.add_data_vector (h1_seminorm_error_per_dof, "H1_seminorm_Error"); - out.add_data_vector (h1_error_per_dof, "H1_Error"); - out.build_patches (); - out.write_ucd (o); - o.close (); - } - else - cout << " Not writing error as grid." << endl; - - cout << endl; - - const unsigned int n_dofs = dof->n_dofs(); - // release the lock that the dof object - // has to the finite element object - dof->clear (); - tria->set_boundary (0); - - delete fe; - delete quadrature; - delete boundary_quadrature; - - return n_dofs; -}; - - -template -void PoissonProblem::print_history (string filename) const { - ofstream out(filename.c_str()); - out << "# n_dofs l1_error l2_error linfty_error h1_seminorm_error h1_error" - << endl; - for (unsigned int i=0; ih/2:" << endl; - cout << " L1 error : " << 1./average_l1 << endl - << " L2 error : " << 1./average_l2 << endl - << " Linfty error : " << 1./average_linfty << endl - << " H1 seminorm error: " << 1./average_h1_semi << endl - << " H1 error : " << 1./average_h1 << endl; - cout << "==========================================================\n"; - cout << "==========================================================\n"; -}; - - - - -int main () { - deallog.depth_console (0); - for (unsigned int order=0; order<5; ++order) - { - PoissonProblem<2> problem (order); - - unsigned int level=0; - unsigned int n_dofs; - do - n_dofs = problem.run (level++); - while (n_dofs<25000); - - string filename; - switch (order) - { - case 0: - filename = "criss_cross"; - break; - case 1: - filename = "linear"; - break; - case 2: - filename = "quadratic"; - break; - case 3: - filename = "cubic"; - break; - case 4: - filename = "quartic"; - break; - }; - filename += ".history"; - - cout << endl << "Printing convergence history to <" - << filename << ">..." << endl; - problem.print_history (filename); - cout << endl << endl << endl; - }; - - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/convergence/make_ps b/deal.II/deal.II/Attic/examples/convergence/make_ps deleted file mode 100644 index 76c13a8624..0000000000 --- a/deal.II/deal.II/Attic/examples/convergence/make_ps +++ /dev/null @@ -1,52 +0,0 @@ -set term postscript eps -set xlabel "Number of degrees of freedom" -set data style linespoints -set logscale xy - - - -set ylabel "Error" - -set output "criss-cross.eps" - -plot "criss_cross.history" using 1:2 title "L1 error","criss_cross.history" using 1:3 title "L2 error","criss_cross.history" using 1:4 title "Linfty error","criss_cross.history" using 1:5 title "H1 seminorm error","criss_cross.history" using 1:6 title "H1 error" - - - -set output "linear.eps" - -plot "linear.history" using 1:2 title "L1 error","linear.history" using 1:3 title "L2 error","linear.history" using 1:4 title "Linfty error","linear.history" using 1:5 title "H1 seminorm error","linear.history" using 1:6 title "H1 error" - - - -set output "quadratic.eps" - -plot "quadratic.history" using 1:2 title "L1 error","quadratic.history" using 1:3 title "L2 error","quadratic.history" using 1:4 title "Linfty error","quadratic.history" using 1:5 title "H1 seminorm error","quadratic.history" using 1:6 title "H1 error" - - - -set output "cubic.eps" - -plot "cubic.history" using 1:2 title "L1 error","cubic.history" using 1:3 title "L2 error","cubic.history" using 1:4 title "Linfty error","cubic.history" using 1:5 title "H1 seminorm error","cubic.history" using 1:6 title "H1 error" - - - -set output "quartic.eps" - -plot "quartic.history" using 1:2 title "L1 error","quartic.history" using 1:3 title "L2 error","quartic.history" using 1:4 title "Linfty error","quartic.history" using 1:5 title "H1 seminorm error","quartic.history" using 1:6 title "H1 error" - - - -set output "l2error.eps" -set ylabel "L2-error" - -plot "criss_cross.history" using 1:3 title "Criss-cross elements", "linear.history" using 1:3 title "Linear elements", "quadratic.history" using 1:3 title "Quadratic elements", "cubic.history" using 1:3 title "Cubic elements", "quartic.history" using 1:3 title "Quartic elements" - - - -set output "h1error.eps" -set ylabel "H1-error" - -plot "criss_cross.history" using 1:6 title "Criss-cross elements", "linear.history" using 1:6 title "Linear elements", "quadratic.history" using 1:6 title "Quadratic elements", "cubic.history" using 1:6 title "Cubic elements", "quartic.history" using 1:6 title "Quartic elements" - - diff --git a/deal.II/deal.II/Attic/examples/dof/.cvsignore b/deal.II/deal.II/Attic/examples/dof/.cvsignore deleted file mode 100644 index 6105d89a5b..0000000000 --- a/deal.II/deal.II/Attic/examples/dof/.cvsignore +++ /dev/null @@ -1,5 +0,0 @@ -dof_test -Makefile.dep -Makefile.dep -*.go -*.o diff --git a/deal.II/deal.II/Attic/examples/dof/Makefile b/deal.II/deal.II/Attic/examples/dof/Makefile deleted file mode 100644 index ee93d64e40..0000000000 --- a/deal.II/deal.II/Attic/examples/dof/Makefile +++ /dev/null @@ -1,172 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998, 1999, 2000 - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = dof_test - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../.. - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-deal2-3d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-deal2-3d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - ./$(target) 2 $(target).prm - ./$(target) 3 $(target).prm - gnuplot make_ps - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/dof/dof_test.cc b/deal.II/deal.II/Attic/examples/dof/dof_test.cc deleted file mode 100644 index a9513d4b60..0000000000 --- a/deal.II/deal.II/Attic/examples/dof/dof_test.cc +++ /dev/null @@ -1,451 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - - -// 1: continuous refinement of the unit square always in the middle -// 2: refinement of the circle at the boundary -// 2: refinement of a wiggled area at the boundary -// 4: random refinement - - - - - - -template -class Ball : - public StraightBoundary { - public: - virtual Point - get_new_point_on_line (const typename Triangulation::line_iterator &line) const { - Point middle = StraightBoundary::get_new_point_on_line(line); - - for (int i=0; i - get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const { - Point middle = StraightBoundary::get_new_point_on_quad(quad); - - for (int i=0; i -class CurvedLine : - public StraightBoundary { - public: - virtual Point - get_new_point_on_line (const typename Triangulation::line_iterator &line) const; - - virtual Point - get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const; -}; - - - -template -Point -CurvedLine::get_new_point_on_line (const typename Triangulation::line_iterator &line) const -{ - Point middle = StraightBoundary::get_new_point_on_line (line); - - // if the line is at the top of bottom - // face: do a special treatment on - // this line. Note that if the - // z-value of the midpoint is either - // 0 or 1, then the z-values of all - // vertices of the line is like that - if (dim>=3) - if (((middle(2) == 0) || (middle(2) == 1)) - // find out, if the line is in the - // interior of the top or bottom face - // of the domain, or at the edge. - // lines at the edge need to undergo - // the usual treatment, while for - // interior lines taking the midpoint - // is sufficient - // - // note: the trick with the boundary - // id was invented after the above was - // written, so we are not very strict - // here with using these flags - && (line->boundary_indicator() == 1)) - return middle; - - - double x=middle(0), - y=middle(1); - - if (y -Point -CurvedLine::get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const -{ - Point middle = StraightBoundary::get_new_point_on_quad (quad); - - // if the face is at the top of bottom - // face: do not move the midpoint in - // x/y direction. Note that if the - // z-value of the midpoint is either - // 0 or 1, then the z-values of all - // vertices of the quad is like that - if ((middle(2) == 0) || (middle(2) == 1)) - return middle; - - double x=middle(0), - y=middle(1); - - if (y -class TestCases : public MultipleParameterLoop::UserClass{ - public: - TestCases (); - virtual ~TestCases (); - - virtual void create_new (const unsigned int run_no); - virtual void declare_parameters (ParameterHandler &prm); - virtual void run (ParameterHandler &prm); - - private: - Triangulation *tria; - DoFHandler *dof; -}; - - - -template -TestCases::TestCases () : - tria(0), dof(0) {}; - - -template -TestCases::~TestCases () -{ - if (dof) delete dof; - if (tria) delete tria; -}; - - - -template -void TestCases::create_new (const unsigned int) { - if (dof != 0) delete dof; - if (tria != 0) delete tria; - - tria = new Triangulation(); - GridGenerator::hyper_cube(*tria); - - dof = new DoFHandler (*tria); -}; - - - -template -void TestCases::declare_parameters (ParameterHandler &prm) { - if (dim>=2) - prm.declare_entry ("Test run", "zoom in", - Patterns::Selection("zoom in|ball|curved line|random")); - else - prm.declare_entry ("Test run", "zoom in", - Patterns::Selection("zoom in|random")); - prm.declare_entry ("Grid file", "grid.1"); - prm.declare_entry ("Sparsity file", "sparsity.1"); - prm.declare_entry ("Condensed sparsity file", "sparsity.c.1"); -}; - - - -template -void TestCases::run (ParameterHandler &prm) { - cout << "Dimension = " << dim - << ", Test case = " << prm.get ("Test run") << endl - << endl; - - string test = prm.get ("Test run"); - unsigned int test_case = 1; - if (test=="zoom in") test_case = 1; - else - if (test=="ball") test_case = 2; - else - if (test=="curved line") test_case = 3; - else - if (test=="random") test_case = 4; - else - cerr << "This test seems not to be implemented!" << endl; - - - cout << " Making grid..." << endl; - Boundary *boundary = 0; - - switch (test_case) - { - case 1: - { - // refine first cell - tria->begin_active()->set_refine_flag(); - tria->execute_coarsening_and_refinement (); - // refine first active cell - // on coarsest level - tria->begin_active()->set_refine_flag (); - tria->execute_coarsening_and_refinement (); - - Triangulation::active_cell_iterator cell; - for (int i=0; i<17; ++i) - { - // refine the presently - // second last cell 17 - // times - cell = tria->last_active(tria->n_levels()-1); - --cell; - cell->set_refine_flag (); - tria->execute_coarsening_and_refinement (); - }; - - break; - } - - case 2: - case 3: - { - if (dim==3) - { - tria->begin_active()->face(2)->set_boundary_indicator(1); - tria->begin_active()->face(4)->set_boundary_indicator(1); - }; - - // set the boundary function - boundary = (test_case==2 ? - static_cast*>(new Ball()) : - static_cast*>(new CurvedLine())); - tria->set_boundary (0, *boundary); - tria->set_boundary (1, *boundary); - - // refine once - tria->begin_active()->set_refine_flag(); - tria->execute_coarsening_and_refinement (); - - Triangulation::active_cell_iterator cell, endc; - for (int i=0; i<6-dim; ++i) - { - cell = tria->begin_active(); - endc = tria->end(); - - // refine all - // boundary cells - for (; cell!=endc; ++cell) - if (cell->at_boundary()) - cell->set_refine_flag(); - - tria->execute_coarsening_and_refinement(); - }; - - break; - } - - case 4: - { - // refine once - tria->begin_active()->set_refine_flag(); - tria->execute_coarsening_and_refinement (); - - Triangulation::active_cell_iterator cell, endc; - for (int i=0; i<(dim==2 ? 12 : (dim==3 ? 7 : 20)); ++i) - { - int n_levels = tria->n_levels(); - cell = tria->begin_active(); - endc = tria->end(); - - for (; cell!=endc; ++cell) - { - double r = rand()*1.0/RAND_MAX, - weight = 1.* - (cell->level()*cell->level()) / - (n_levels*n_levels); - - if (r <= 0.5*weight) - cell->set_refine_flag (); - }; - - tria->execute_coarsening_and_refinement (); - }; - break; - } - }; - - // output the grid - string file_prefix ("results/"); - file_prefix += ('0'+dim); - file_prefix += "d."; - - cout << " Writing grid..." << endl; - ofstream out((file_prefix + prm.get("Grid file")).c_str()); - GridOut().write_gnuplot (*tria, out); - - - - - cout << " Distributing degrees of freedom..." << endl; - FEQ1 fe; - dof->distribute_dofs (fe); - - cout << " Renumbering degrees of freedom..." << endl; - DoFRenumbering::Cuthill_McKee (*dof); - - SparsityPattern sparsity (dof->n_dofs(), - dof->max_couplings_between_dofs()); - - - DoFTools::make_sparsity_pattern (*dof, sparsity); - int unconstrained_bandwidth = sparsity.bandwidth(); - - cout << " Writing sparsity pattern..." << endl; - ofstream sparsity_out ((file_prefix + prm.get("Sparsity file")).c_str()); - sparsity.print_gnuplot (sparsity_out); - - - - // computing constraints - cout << " Computing constraints..." << endl; - ConstraintMatrix constraints; - DoFTools::make_hanging_node_constraints (*dof, constraints); - constraints.close (); - constraints.condense (sparsity); - - cout << " Writing condensed sparsity pattern..." << endl; - ofstream c_sparsity_out ((file_prefix + - prm.get("Condensed sparsity file")).c_str()); - sparsity.print_gnuplot (c_sparsity_out); - - - cout << endl - << " Total number of cells = " << tria->n_cells() << endl - << " Total number of active cells = " << tria->n_active_cells() << endl - << " Number of DoFs = " << dof->n_dofs() << endl - << " Number of constraints = " << constraints.n_constraints() << endl - << " Unconstrained matrix bandwidth= " << unconstrained_bandwidth << endl - << " Constrained matrix bandwidth = " << sparsity.bandwidth() - << endl << endl; - - // release the lock that dof has to the - // finite element object - dof->clear (); - tria->set_boundary (0); - tria->set_boundary (1); - if (boundary) - delete boundary; -}; - - - -int main (int argc, char **argv) { - if (argc!=3) - { - cerr << "Usage: dof_test dimension parameterfile" << endl << endl; - return 1; - }; - - unsigned int dim; - if (argv[1][0] == '2') - dim = 2; - else - dim = 3; - - switch (dim) - { - case 2: - { - TestCases<2> tests; - MultipleParameterLoop input_data; - - tests.declare_parameters(input_data); - input_data.read_input (argv[2]); - input_data.loop (tests); - - break; - }; - - case 3: - { - TestCases<3> tests; - MultipleParameterLoop input_data; - - tests.declare_parameters(input_data); - input_data.read_input (argv[2]); - input_data.loop (tests); - - break; - }; - }; - - return 0; -}; - diff --git a/deal.II/deal.II/Attic/examples/dof/dof_test.prm b/deal.II/deal.II/Attic/examples/dof/dof_test.prm deleted file mode 100644 index 84697993f4..0000000000 --- a/deal.II/deal.II/Attic/examples/dof/dof_test.prm +++ /dev/null @@ -1,4 +0,0 @@ -set Test run = { zoom in | ball | curved line | random } -set Grid file = {{ zoom_in | ball | curved_line | random }}.grid -set Sparsity file = {{ zoom_in | ball | curved_line | random }}.sparsity -set Condensed sparsity file = {{ zoom_in | ball | curved_line | random }}.sparsity.c \ No newline at end of file diff --git a/deal.II/deal.II/Attic/examples/dof/make_ps b/deal.II/deal.II/Attic/examples/dof/make_ps deleted file mode 100644 index 281a20bc8b..0000000000 --- a/deal.II/deal.II/Attic/examples/dof/make_ps +++ /dev/null @@ -1,122 +0,0 @@ -set size 0.721,1 -set data style lines -set noxtics -set noytics -set noztics -set noxzeroaxis -set noyzeroaxis -set nokey -set term postscript eps - -!echo " Making <2d.zoom_in.grid.eps>" -set output "results/2d.zoom_in.grid.eps" -plot "results/2d.zoom_in.grid" - -!echo " Making <2d.ball.grid.eps>" -set output "results/2d.ball.grid.eps" -plot "results/2d.ball.grid" - -!echo " Making <2d.curved_line.grid.eps>" -set output "results/2d.curved_line.grid.eps" -plot "results/2d.curved_line.grid" - -!echo " Making <2d.random.grid.eps>" -set output "results/2d.random.grid.eps" -plot "results/2d.random.grid" - - - - -!echo " Making <3d.zoom_in.grid.eps>" -set output "results/3d.zoom_in.grid.eps" -splot "results/3d.zoom_in.grid" - -!echo " Making <3d.ball.grid.eps>" -set output "results/3d.ball.grid.eps" -splot "results/3d.ball.grid" - -!echo " Making <3d.curved_line.grid.eps>" -set output "results/3d.curved_line.grid.eps" -splot "results/3d.curved_line.grid" - -!echo " Making <3d.random.grid.eps>" -set output "results/3d.random.grid.eps" -splot "results/3d.random.grid" - - - - -set data style dots - -!echo " Making <2d.zoom_in.sparsity.eps>" -set output "results/2d.zoom_in.sparsity.eps" -plot "results/2d.zoom_in.sparsity" - -!echo " Making <2d.zoom_in.sparsity.c.eps>" -set output "results/2d.zoom_in.sparsity.c.eps" -plot "results/2d.zoom_in.sparsity.c" - - -!echo " Making <2d.ball.sparsity.eps>" -set output "results/2d.ball.sparsity.eps" -plot "results/2d.ball.sparsity" - -!echo " Making <2d.ball.sparsity.c.eps>" -set output "results/2d.ball.sparsity.c.eps" -plot "results/2d.ball.sparsity.c" - - -!echo " Making <2d.curved_line.sparsity.eps>" -set output "results/2d.curved_line.sparsity.eps" -plot "results/2d.curved_line.sparsity" - -!echo " Making <2d.curved_line.sparsity.c.eps>" -set output "results/2d.curved_line.sparsity.c.eps" -plot "results/2d.curved_line.sparsity.c" - - -!echo " Making <2d.random.sparsity.eps>" -set output "results/2d.random.sparsity.eps" -plot "results/2d.random.sparsity" - -!echo " Making <2d.random.sparsity.c.eps>" -set output "results/2d.random.sparsity.c.eps" -plot "results/2d.random.sparsity.c" - - - -!echo " Making <3d.zoom_in.sparsity.eps>" -set output "results/3d.zoom_in.sparsity.eps" -plot "results/3d.zoom_in.sparsity" - -!echo " Making <3d.zoom_in.sparsity.c.eps>" -set output "results/3d.zoom_in.sparsity.c.eps" -plot "results/3d.zoom_in.sparsity.c" - - -!echo " Making <3d.ball.sparsity.eps>" -set output "results/3d.ball.sparsity.eps" -plot "results/3d.ball.sparsity" - -!echo " Making <3d.ball.sparsity.c.eps>" -set output "results/3d.ball.sparsity.c.eps" -plot "results/3d.ball.sparsity.c" - - -!echo " Making <3d.curved_line.sparsity.eps>" -set output "results/3d.curved_line.sparsity.eps" -plot "results/3d.curved_line.sparsity" - -!echo " Making <3d.curved_line.sparsity.c.eps>" -set output "results/3d.curved_line.sparsity.c.eps" -plot "results/3d.curved_line.sparsity.c" - - -!echo " Making <3d.random.sparsity.eps>" -set output "results/3d.random.sparsity.eps" -plot "results/3d.random.sparsity" - -!echo " Making <3d.random.sparsity.c.eps>" -set output "results/3d.random.sparsity.c.eps" -plot "results/3d.random.sparsity.c" - diff --git a/deal.II/deal.II/Attic/examples/dof/results/.cvsignore b/deal.II/deal.II/Attic/examples/dof/results/.cvsignore deleted file mode 100644 index 480cb8565d..0000000000 --- a/deal.II/deal.II/Attic/examples/dof/results/.cvsignore +++ /dev/null @@ -1 +0,0 @@ -?d.* diff --git a/deal.II/deal.II/Attic/examples/error-estimation/.cvsignore b/deal.II/deal.II/Attic/examples/error-estimation/.cvsignore deleted file mode 100644 index e3db7764a9..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/.cvsignore +++ /dev/null @@ -1,4 +0,0 @@ -error-estimation -Makefile.dep -*.go -*.o diff --git a/deal.II/deal.II/Attic/examples/error-estimation/Makefile b/deal.II/deal.II/Attic/examples/error-estimation/Makefile deleted file mode 100644 index ed89446ecc..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/Makefile +++ /dev/null @@ -1,172 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998, 1999, 2000 - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = error-estimation - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../.. - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - ./$(target) ee.gauss.prm - ./$(target) ee.singular.prm - ./$(target) ee.kink.prm - gnuplot make_ps - - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/error-estimation/ee.gauss.prm b/deal.II/deal.II/Attic/examples/error-estimation/ee.gauss.prm deleted file mode 100644 index 560bd59cf3..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/ee.gauss.prm +++ /dev/null @@ -1,8 +0,0 @@ -set Test case = Gauss shape -set Initial refinement = 2 -set Refinement criterion = { global | true error | estimated error } -set Refinement fraction = 0.3 -set Coarsening fraction = 0.03 -set Maximum cells = 10000 -set Output base filename = data-gauss/ -set Output format = ucd diff --git a/deal.II/deal.II/Attic/examples/error-estimation/ee.kink.prm b/deal.II/deal.II/Attic/examples/error-estimation/ee.kink.prm deleted file mode 100644 index 6bf0b8e88c..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/ee.kink.prm +++ /dev/null @@ -1,8 +0,0 @@ -set Test case = Kink -set Initial refinement = 1 -set Refinement criterion = { global | estimated error } -set Refinement fraction = 0.1 -set Coarsening fraction = 0.02 -set Maximum cells = 100000 -set Output base filename = data-kink/ -set Output format = ucd diff --git a/deal.II/deal.II/Attic/examples/error-estimation/ee.singular.prm b/deal.II/deal.II/Attic/examples/error-estimation/ee.singular.prm deleted file mode 100644 index eb6988553a..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/ee.singular.prm +++ /dev/null @@ -1,8 +0,0 @@ -set Test case = Singular -set Initial refinement = 1 -set Refinement criterion = { global | estimated error } -set Refinement fraction = 0.1 -set Coarsening fraction = 0.02 -set Maximum cells = 100000 -set Output base filename = data-singular/ -set Output format = ucd diff --git a/deal.II/deal.II/Attic/examples/error-estimation/error-estimation.cc b/deal.II/deal.II/Attic/examples/error-estimation/error-estimation.cc deleted file mode 100644 index cd11fc8630..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/error-estimation.cc +++ /dev/null @@ -1,755 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - - - - -template -class PoissonEquation : public Equation { - public: - PoissonEquation (const Function &rhs) : - Equation(1), - use_coefficient(false), - right_hand_side (rhs), - coefficient (default_coefficient) {}; - - PoissonEquation (const Function &rhs, - const Function &coefficient ) : - Equation(1), - use_coefficient(true), - right_hand_side (rhs), - coefficient (coefficient) {}; - - virtual void assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (FullMatrix &cell_matrix, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - protected: - const bool use_coefficient; - const Function &right_hand_side; - const Function &coefficient; - - static const ConstantFunction default_coefficient; -}; - - -const ConstantFunction<2> PoissonEquation<2>::default_coefficient(1); - - - - - -template -class PoissonProblem : public ProblemBase, public MultipleParameterLoop::UserClass { - public: - enum RefineMode { - global, true_error, error_estimator - }; - - PoissonProblem (); - ~PoissonProblem (); - - void clear (); - void create_new (const unsigned int); - void declare_parameters (ParameterHandler &prm); - void run (ParameterHandler &prm); - void print_history (const ParameterHandler &prm, - const RefineMode refine_mode) const; - - protected: - Triangulation *tria; - DoFHandler *dof; - - Function *rhs; - Function *solution_function; - Function *coefficient; - - Boundary *boundary; - - vector l2_error, linfty_error; - vector h1_error, estimated_error; - vector n_dofs; -}; - - - - - -template -class Solution { - public: - - class GaussShape : public Function { - public: - virtual double value (const Point &p, - const unsigned int component) const; - virtual Tensor<1,dim> gradient (const Point &p, - const unsigned int component) const; - }; - - class Singular : public Function { - public: - virtual double value (const Point &p, - const unsigned int component) const; - virtual Tensor<1,dim> gradient (const Point &p, - const unsigned int component) const; - }; - - class Kink : public Function { - public: - class Coefficient : public Function { - public: - virtual double value (const Point &p, - const unsigned int component) const; - }; - - virtual double value (const Point &p, - const unsigned int component) const; - virtual Tensor<1,dim> gradient (const Point &p, - const unsigned int component) const; - }; -}; - - - - -template -class RHS { - public: - - /** - * Right hand side constructed such that - * the exact solution is - * $x*y*exp(-(x**2+y**2)*40)$. - */ - class GaussShape : public Function { - public: - virtual double value (const Point &p, - const unsigned int component) const; - }; - - /** - * Right hand side constructed such that - * the exact solution is - * $r^{2/3}$. - */ - class Singular : public Function { - public: - virtual double value (const Point &p, - const unsigned int component) const; - }; - - /** - * Right hand side constructed such that - * the exact solution is - * $(1+4\theta(f))*f$ with - * $f=y-x**2$. - */ - class Kink : public Function { - public: - virtual double value (const Point &p, - const unsigned int component) const; - }; -}; - - - - -template <> -double Solution<2>::GaussShape::value (const Point<2> &p, - const unsigned int) const { - return p(0)*p(1)*exp(-40*p.square()); -}; - - -template <> -Tensor<1,2> Solution<2>::GaussShape::gradient (const Point<2> &p, - const unsigned int) const { - return Point<2> ((1-80.*p(0)*p(0))*p(1)*exp(-40*p.square()), - (1-80.*p(1)*p(1))*p(0)*exp(-40*p.square())); -}; - - - -template <> -double Solution<2>::Singular::value (const Point<2> &p, - const unsigned int) const { - return pow(p.square(), 1./3.); -}; - - -template <> -Tensor<1,2> Solution<2>::Singular::gradient (const Point<2> &p, - const unsigned int) const { - return 2./3.*pow(p.square(), -2./3.) * p; -}; - - - - -inline double theta(const double x) { - return (x>0 ? 1 : 0); -}; - - - -template <> -double Solution<2>::Kink::value (const Point<2> &p, - const unsigned int) const { - const double s = p(1)-p(0)*p(0); - return (1+4*theta(s))*s; -}; - - -template <> -Tensor<1,2> Solution<2>::Kink::gradient (const Point<2> &p, - const unsigned int) const { - const double s = p(1)-p(0)*p(0); - return (1+4*theta(s))*Point<2>(-2*p(0),1); -}; - - -template <> -double Solution<2>::Kink::Coefficient::value (const Point<2> &p, - const unsigned int) const { - const double s = p(1)-p(0)*p(0); - return 1./(1.+4.*theta(s)); -}; - - - -template <> -double RHS<2>::GaussShape::value (const Point<2> &p, - const unsigned int) const { - return (480.-6400.*p.square())*p(0)*p(1)*exp(-40.*p.square()); -}; - - -template <> -double RHS<2>::Singular::value (const Point<2> &p, - const unsigned int) const { - return -4./9. * pow(p.square(), -2./3.); -}; - - -template <> -double RHS<2>::Kink::value (const Point<2> &, - const unsigned int) const { - return 2; -}; - - - - - - - - -template <> -void PoissonEquation<2>::assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues<2> &fe_values, - const DoFHandler<2>::cell_iterator &) const { - for (unsigned int point=0; point -void PoissonEquation::assemble (FullMatrix &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - -template -void PoissonEquation::assemble (Vector &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - - - - - - - -template -PoissonProblem::PoissonProblem () : - tria(0), dof(0), rhs(0), - solution_function(0), coefficient(0), - boundary(0) {}; - - - -template -PoissonProblem::~PoissonProblem () -{ - clear (); -}; - - - -template -void PoissonProblem::clear () { - if (dof != 0) { delete dof; dof = 0; }; - if (tria != 0) { delete tria; tria = 0; }; - if (rhs != 0) { delete rhs; rhs = 0; }; - if (solution_function != 0) { delete solution_function; solution_function = 0; }; - if (coefficient != 0) { delete coefficient; coefficient = 0; }; - if (boundary != 0) { delete boundary; boundary = 0; }; - - // make it known to the underlying - // ProblemBase that tria and dof - // are already deleted - set_tria_and_dof (tria, dof); - - l2_error.clear (); - linfty_error.clear (); - h1_error.clear (); - estimated_error.clear(); - n_dofs.clear (); - - ProblemBase::clear (); -}; - - - - -template -void PoissonProblem::create_new (const unsigned int) { - clear (); - - tria = new Triangulation(); - dof = new DoFHandler (*tria); - set_tria_and_dof (tria, dof); - boundary = new HyperBallBoundary (); -}; - - - -template -void PoissonProblem::declare_parameters (ParameterHandler &prm) { - prm.declare_entry ("Test case", "Gauss shape", - Patterns::Selection("Gauss shape|Singular|Kink")); - prm.declare_entry ("Initial refinement", "2", - Patterns::Integer()); - prm.declare_entry ("Refinement criterion", "estimated error", - Patterns::Selection("global|true error|estimated error")); - prm.declare_entry ("Refinement fraction", "0.3", - Patterns::Double()); - prm.declare_entry ("Coarsening fraction", "0.1", - Patterns::Double()); - prm.declare_entry ("Maximum cells", "3000", - Patterns::Integer()); - prm.declare_entry ("Output base filename", ""); - prm.declare_entry ("Output format", "ucd", - Patterns::Selection("ucd|gnuplot")); -}; - - - - -template -void PoissonProblem::run (ParameterHandler &prm) { - cout << "=======================================" - << "=======================================" << endl - << "===== Test case: " << prm.get ("Test case") << endl - << "===== Doing computation with refinement criterion: "; - RefineMode refine_mode; - if (prm.get("Refinement criterion")=="global") - refine_mode = global; - else - if (prm.get("Refinement criterion")=="true error") - refine_mode = true_error; - else - if (prm.get("Refinement criterion")=="estimated error") - refine_mode = error_estimator; - else - return; - - switch (refine_mode) - { - case global: - cout << "global"; - break; - case true_error: - cout << "true error"; - break; - case error_estimator: - cout << "error estimator"; - break; - }; - - cout << endl - << "=======================================" - << "=======================================" << endl; - cout << "Making initial grid... " << endl; - const unsigned int start_level(prm.get_integer("Initial refinement")); - tria->set_boundary (0, *boundary); - GridGenerator::hyper_ball (*tria); - tria->refine_global (start_level); - - if (prm.get("Test case")=="Gauss shape") - rhs = new RHS::GaussShape(); - else - if (prm.get("Test case")=="Singular") - rhs = new RHS::Singular(); - else - if (prm.get("Test case")=="Kink") - rhs = new RHS::Kink(); - - if (prm.get("Test case")=="Gauss shape") - solution_function = new Solution::GaussShape (); - else - if (prm.get("Test case")=="Singular") - solution_function = new Solution::Singular (); - else - if (prm.get("Test case")=="Kink") - solution_function = new Solution::Kink (); - - - FEQ1 fe; - QGauss3 quadrature; - PoissonEquation *equation; - - static Solution::Kink::Coefficient kink_coefficient; - if (prm.get("Test case")=="Kink") - equation = new PoissonEquation(*rhs, kink_coefficient); - else - equation = new PoissonEquation(*rhs); - - SolutionTransfer solution_transfer (*dof_handler); - - unsigned int refine_step = 0; - const unsigned int max_cells = prm.get_integer("Maximum cells"); - while (tria->n_active_cells() < max_cells) - { - Vector old_solution = solution; - cout << "Refinement step " << refine_step - << ", using " << tria->n_active_cells() << " active cells on " - << tria->n_levels() << " levels." - << endl; - cout << " Distributing dofs... "; - dof->distribute_dofs (fe); - cout << dof->n_dofs() << " degrees of freedom." << endl; - n_dofs.push_back (dof->n_dofs()); - - cout << " Assembling matrices..." << endl; - UpdateFlags update_flags = UpdateFlags(update_values | update_q_points | - update_gradients | update_JxW_values); - - ProblemBase::FunctionMap dirichlet_bc; - dirichlet_bc[0] = solution_function; - assemble (*equation, quadrature, update_flags, dirichlet_bc); - - // if we have an old solution lying - // around, use it to preset the solution - // vector. this reduced the quired - // number of iterations by about - // 10 per cent - if (refine_step != 0) - { - solution.reinit (dof_handler->n_dofs()); - solution_transfer.interpolate (old_solution, solution); - - // if you don't want to preset - // the solution vector, - // uncomment the following - // line and comment out the - // preceding one -// solution.reinit (dof_handler->n_dofs()); - - solution_transfer.clear (); - }; - - cout << " Solving..." << endl; - - solve (); - - - Vector l2_error_per_cell, linfty_error_per_cell, h1_error_per_cell; - Vector estimated_error_per_cell; - QGauss3 q; - - cout << " Calculating L2 error... "; - VectorTools::integrate_difference (*dof_handler, - solution, *solution_function, - l2_error_per_cell, q, - L2_norm); - cout << l2_error_per_cell.l2_norm() << endl; - l2_error.push_back (l2_error_per_cell.l2_norm()); - - cout << " Calculating L-infinity error... "; - VectorTools::integrate_difference (*dof_handler, - solution, *solution_function, - linfty_error_per_cell, q, - Linfty_norm); - cout << linfty_error_per_cell.linfty_norm() << endl; - linfty_error.push_back (linfty_error_per_cell.linfty_norm()); - - cout << " Calculating H1 error... "; - VectorTools::integrate_difference (*dof_handler, - solution, *solution_function, - h1_error_per_cell, q, - H1_norm); - cout << h1_error_per_cell.l2_norm() << endl; - h1_error.push_back (h1_error_per_cell.l2_norm()); - - cout << " Estimating H1 error... "; - - QSimpson eq; - KellyErrorEstimator::estimate (*dof, eq, - KellyErrorEstimator::FunctionMap(), - solution, - estimated_error_per_cell, - vector(), // all components - ((prm.get("Test case")=="Kink") ? - &kink_coefficient : 0 )); - cout << estimated_error_per_cell.l2_norm() << endl; - estimated_error.push_back (estimated_error_per_cell.l2_norm()); - - Vector l2_error_per_dof(dof->n_dofs()), linfty_error_per_dof(dof->n_dofs()); - Vector h1_error_per_dof(dof->n_dofs()), estimated_error_per_dof(dof->n_dofs()); - Vector error_ratio (dof->n_dofs()); - DoFTools::distribute_cell_to_dof_vector (*dof, l2_error_per_cell, l2_error_per_dof); - DoFTools::distribute_cell_to_dof_vector (*dof, linfty_error_per_cell, - linfty_error_per_dof); - DoFTools::distribute_cell_to_dof_vector (*dof, h1_error_per_cell, h1_error_per_dof); - DoFTools::distribute_cell_to_dof_vector (*dof, estimated_error_per_cell, - estimated_error_per_dof); - error_ratio.ratio (h1_error_per_dof, estimated_error_per_dof); - - DataOut out; - fill_data (out); - out.add_data_vector (l2_error_per_dof, "L2_Error"); - out.add_data_vector (linfty_error_per_dof, "Linfty_Error"); - out.add_data_vector (h1_error_per_dof, "H1_Error"); - out.add_data_vector (estimated_error_per_dof, "Estimated_Error"); - out.add_data_vector (error_ratio, "Ratio_True_to_Estimated_Error"); - out.build_patches (); - string filename = prm.get ("Output base filename"); - switch (refine_mode) - { - case global: - filename += "global."; - break; - case true_error: - filename += "true_error."; - break; - case error_estimator: - filename += "estimated_error."; - break; - }; - filename += ('0'+(start_level+refine_step)/10); - filename += ('0'+(start_level+refine_step)%10); - - if (prm.get("Output format")=="ucd") - filename += ".inp"; - else - if (prm.get("Output format")=="gnuplot") - filename += ".gnuplot"; - - cout << " Writing error plots to <" << filename << ">..." << endl; - ofstream outfile(filename.c_str()); - if (prm.get("Output format")=="ucd") - out.write_ucd (outfile); - else - if (prm.get("Output format")=="gnuplot") - out.write_gnuplot (outfile); - - outfile.close(); - - cout << " Refining triangulation..."; - switch (refine_mode) - { - case global: - tria->set_all_refine_flags (); - break; - case true_error: - tria->refine_and_coarsen_fixed_number (h1_error_per_cell, - prm.get_double("Refinement fraction"), - prm.get_double("Coarsening fraction")); - break; - case error_estimator: - tria->refine_and_coarsen_fixed_number (estimated_error_per_cell, - prm.get_double("Refinement fraction"), - prm.get_double("Coarsening fraction")); - break; - }; - - tria->prepare_coarsening_and_refinement (); - solution_transfer.prepare_for_coarsening_and_refinement (solution); - tria->execute_coarsening_and_refinement (); - - cout << endl << endl; - ++refine_step; - }; - - string filename = prm.get ("Output base filename"); - switch (refine_mode) - { - case global: - filename += "global."; - break; - case true_error: - filename += "true_error."; - break; - case error_estimator: - filename += "estimated_error."; - break; - }; - - cout << endl; - - filename += "finest_mesh.gnuplot"; - cout << " Writing finest grid to <" << filename << ">... " << endl; - ofstream finest_mesh (filename.c_str()); - GridOut().write_gnuplot (*tria, finest_mesh); - finest_mesh.close(); - - print_history (prm, refine_mode); - cout << endl << endl << endl; - - dof->clear (); - delete equation; -}; - - -template -void PoissonProblem::print_history (const ParameterHandler &prm, - const RefineMode refine_mode) const { - string filename(prm.get("Output base filename")); - filename += "history."; - switch (refine_mode) - { - case global: - filename += "global."; - break; - case true_error: - filename += "true_error."; - break; - case error_estimator: - filename += "estimated_error."; - break; - }; - filename += "gnuplot"; - - cout << endl << "Printing convergence history to <" << filename << ">..." - << endl; - ofstream out(filename.c_str()); - out << "# n_dofs l2_error linfty_error " - << "h1_error estimated_error" - << endl; - for (unsigned int i=0; ih/2:" << endl; - cout << " L2 error : " << 1./average_l2 << endl - << " Linfty error : " << 1./average_linfty << endl - << " H1 error : " << 1./average_h1 << endl - << " Estimated error : " << 1./average_est << endl; -}; - - - - -int main (int argc, char **argv) { - if (argc!=2) - { - cout << "Usage: error-estimation parameterfile" << endl << endl; - return 1; - }; - - PoissonProblem<2> poisson; - MultipleParameterLoop input_data; - - poisson.declare_parameters(input_data); - input_data.read_input (argv[1]); - input_data.loop (poisson); - - return 0; -}; - - - diff --git a/deal.II/deal.II/Attic/examples/error-estimation/make_ps b/deal.II/deal.II/Attic/examples/error-estimation/make_ps deleted file mode 100644 index 5c06b6c8f2..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/make_ps +++ /dev/null @@ -1,94 +0,0 @@ -set xlabel "Number of degrees of freedom" -set ylabel "Error" -set data style linespoints -set logscale xy - -set term postscript eps - - -set output "data-gauss/history.global.eps" - -plot "data-gauss/history.global.gnuplot" using 1:2 title "L2 error","data-gauss/history.global.gnuplot" using 1:3 title "Linfty error","data-gauss/history.global.gnuplot" using 1:4 title "H1 error","data-gauss/history.global.gnuplot" using 1:5 title "Estimated H1 error" - - -set output "data-gauss/history.true_error.eps" - -plot "data-gauss/history.true_error.gnuplot" using 1:2 title "L2 error","data-gauss/history.true_error.gnuplot" using 1:3 title "Linfty error","data-gauss/history.true_error.gnuplot" using 1:4 title "H1 error","data-gauss/history.true_error.gnuplot" using 1:5 title "Estimated H1 error" - - -set output "data-gauss/history.estimated_error.eps" - -plot "data-gauss/history.estimated_error.gnuplot" using 1:2 title "L2 error","data-gauss/history.estimated_error.gnuplot" using 1:3 title "Linfty error","data-gauss/history.estimated_error.gnuplot" using 1:4 title "H1 error","data-gauss/history.estimated_error.gnuplot" using 1:5 title "Estimated H1 error" - - -set output "data-gauss/history.compare.eps" -plot "data-gauss/history.global.gnuplot" using 1:2 title "global refinement -- L2 error", "data-gauss/history.true_error.gnuplot" using 1:2 title "ref. by true error -- L2 error", "data-gauss/history.estimated_error.gnuplot" using 1:2 title "ref. by estimated error -- L2 error", 0.1/sqrt(x) title "O(h)", "data-gauss/history.global.gnuplot" using 1:4 title "global refinement -- H1 error", "data-gauss/history.true_error.gnuplot" using 1:4 title "ref. by true error -- H1 error", "data-gauss/history.estimated_error.gnuplot" using 1:4 title "ref. by estimated error -- H1 error", 0.04/x title "O(h^2)" - - - - - -set output "data-singular/history.global.eps" - -plot "data-singular/history.global.gnuplot" using 1:2 title "L2 error","data-singular/history.global.gnuplot" using 1:3 title "Linfty error","data-singular/history.global.gnuplot" using 1:4 title "H1 error","data-singular/history.global.gnuplot" using 1:5 title "Estimated H1 error" - - -set output "data-singular/history.estimated_error.eps" - -plot "data-singular/history.estimated_error.gnuplot" using 1:2 title "L2 error","data-singular/history.estimated_error.gnuplot" using 1:3 title "Linfty error","data-singular/history.estimated_error.gnuplot" using 1:4 title "H1 error","data-singular/history.estimated_error.gnuplot" using 1:5 title "Estimated H1 error" - - -set output "data-singular/history.compare.eps" -plot "data-singular/history.global.gnuplot" using 1:2 title "global refinement -- L2 error", "data-singular/history.estimated_error.gnuplot" using 1:2 title "ref. by estimated error -- L2 error", 1.1/x**0.33 title "O(h^2/3)", 2./sqrt(x) title "O(h)", "data-singular/history.global.gnuplot" using 1:4 title "global refinement -- H1 error", "data-singular/history.estimated_error.gnuplot" using 1:4 title "ref. by estimated error -- H1 error", 0.2/x**0.4 title "O(h^0.8)", 4./x title "O(h^2)" - - - - - - - -set output "data-kink/history.global.eps" - -plot "data-kink/history.global.gnuplot" using 1:2 title "L2 error","data-kink/history.global.gnuplot" using 1:3 title "Linfty error","data-kink/history.global.gnuplot" using 1:4 title "H1 error","data-kink/history.global.gnuplot" using 1:5 title "Estimated H1 error" - - -set output "data-kink/history.estimated_error.eps" - -plot "data-kink/history.estimated_error.gnuplot" using 1:2 title "L2 error","data-kink/history.estimated_error.gnuplot" using 1:3 title "Linfty error","data-kink/history.estimated_error.gnuplot" using 1:4 title "H1 error","data-kink/history.estimated_error.gnuplot" using 1:5 title "Estimated H1 error" - - -set output "data-kink/history.compare.eps" -plot "data-kink/history.global.gnuplot" using 1:2 title "global refinement -- L2 error", "data-kink/history.estimated_error.gnuplot" using 1:2 title "ref. by estimated error -- L2 error", 5/x**0.25 title "O(h^1/2)", 20/x**0.5 title "O(h)", "data-kink/history.global.gnuplot" using 1:4 title "global refinement -- H1 error", "data-kink/history.estimated_error.gnuplot" using 1:4 title "ref. by estimated error -- H1 error", 1.5/sqrt(x) title "O(h)", 20/x**0.95 title "O(h^1.8)" - - - - -set parametric -set data style lines -set nologscale xy -set size 0.7,1 - -set output "data-gauss/finest_mesh.global.eps" -plot "data-gauss/global.finest_mesh.gnuplot" title "Finest mesh" - -set output "data-gauss/finest_mesh.true_error.eps" -plot "data-gauss/true_error.finest_mesh.gnuplot" title "Finest mesh" - -set output "data-gauss/finest_mesh.estimated_error.eps" -plot "data-gauss/estimated_error.finest_mesh.gnuplot" title "Finest mesh" - - - -set output "data-singular/finest_mesh.global.eps" -plot "data-singular/global.finest_mesh.gnuplot" title "Finest mesh" - -set output "data-singular/finest_mesh.estimated_error.eps" -plot "data-singular/estimated_error.finest_mesh.gnuplot" title "Finest mesh" - - - -set output "data-kink/finest_mesh.global.eps" -plot "data-kink/global.finest_mesh.gnuplot" title "Finest mesh" - -set output "data-kink/finest_mesh.estimated_error.eps" -plot "data-kink/estimated_error.finest_mesh.gnuplot" title "Finest mesh" diff --git a/deal.II/deal.II/Attic/examples/error-estimation/strip_comments b/deal.II/deal.II/Attic/examples/error-estimation/strip_comments deleted file mode 100755 index 779b6b16c7..0000000000 --- a/deal.II/deal.II/Attic/examples/error-estimation/strip_comments +++ /dev/null @@ -1 +0,0 @@ -perl -pi -e 's/^#.*$\\n//g' data-*/*.inp diff --git a/deal.II/deal.II/Attic/examples/grid/.cvsignore b/deal.II/deal.II/Attic/examples/grid/.cvsignore deleted file mode 100644 index d91582a08e..0000000000 --- a/deal.II/deal.II/Attic/examples/grid/.cvsignore +++ /dev/null @@ -1,4 +0,0 @@ -grid_test -Makefile.dep -*.go -*.o diff --git a/deal.II/deal.II/Attic/examples/grid/Makefile b/deal.II/deal.II/Attic/examples/grid/Makefile deleted file mode 100644 index 455e443c64..0000000000 --- a/deal.II/deal.II/Attic/examples/grid/Makefile +++ /dev/null @@ -1,178 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998, 1999, 2000 - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = grid_test - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../.. - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-deal2-3d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-deal2-3d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - ./$(target) 2 1 - ./$(target) 2 2 - ./$(target) 2 3 - ./$(target) 2 4 - ./$(target) 3 1 - ./$(target) 3 2 - ./$(target) 3 3 - ./$(target) 3 4 - - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/grid/grid_test.cc b/deal.II/deal.II/Attic/examples/grid/grid_test.cc deleted file mode 100644 index c5d4c0800a..0000000000 --- a/deal.II/deal.II/Attic/examples/grid/grid_test.cc +++ /dev/null @@ -1,329 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - - -// 1: continuous refinement of the unit square always in the middle -// 2: refinement of the circle at the boundary -// 2: refinement of a wiggled area at the boundary -// 4: random refinement - - - - - -template -class Ball : - public StraightBoundary { - public: - virtual Point - get_new_point_on_line (const typename Triangulation::line_iterator &line) const { - Point middle = StraightBoundary::get_new_point_on_line(line); - - for (int i=0; i - get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const { - Point middle = StraightBoundary::get_new_point_on_quad(quad); - - for (int i=0; i -class CurvedLine : - public StraightBoundary { - public: - virtual Point - get_new_point_on_line (const typename Triangulation::line_iterator &line) const; - - virtual Point - get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const; -}; - - - -template -Point -CurvedLine::get_new_point_on_line (const typename Triangulation::line_iterator &line) const -{ - Point middle = StraightBoundary::get_new_point_on_line (line); - - // if the line is at the top of bottom - // face: do a special treatment on - // this line. Note that if the - // z-value of the midpoint is either - // 0 or 1, then the z-values of all - // vertices of the line is like that - if (dim>=3) - if (((middle(2) == 0) || (middle(2) == 1)) - // find out, if the line is in the - // interior of the top or bottom face - // of the domain, or at the edge. - // lines at the edge need to undergo - // the usual treatment, while for - // interior lines taking the midpoint - // is sufficient - // - // note: the trick with the boundary - // id was invented after the above was - // written, so we are not very strict - // here with using these flags - && (line->boundary_indicator() == 1)) - return middle; - - - double x=middle(0), - y=middle(1); - - if (y -Point -CurvedLine::get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const -{ - Point middle = StraightBoundary::get_new_point_on_quad (quad); - - // if the face is at the top of bottom - // face: do not move the midpoint in - // x/y direction. Note that if the - // z-value of the midpoint is either - // 0 or 1, then the z-values of all - // vertices of the quad is like that - if ((middle(2) == 0) || (middle(2) == 1)) - return middle; - - double x=middle(0), - y=middle(1); - - if (y -void test (const int test_case) { - cout << "Running testcase " << test_case - << " in " << dim << " dimensions." << endl; - Triangulation tria; - GridGenerator::hyper_cube(tria); - - if ((dim==1) && ((test_case==2) || (test_case==3))) - { - cout << "Impossible for this dimension." << endl; - return; - }; - - - switch (test_case) - { - case 1: - { - // we want to log the - // refinement history -// ofstream history ("mesh.history"); - - // refine first cell - tria.begin_active()->set_refine_flag(); -// tria.save_refine_flags (history); - tria.execute_coarsening_and_refinement (); - - // refine first active cell - // on coarsest level - tria.begin_active()->set_refine_flag (); -// tria.save_refine_flags (history); - tria.execute_coarsening_and_refinement (); - - Triangulation::active_cell_iterator cell; - for (int i=0; i<17; ++i) - { - // refine the presently - // second last cell 17 - // times - cell = tria.last_active(tria.n_levels()-1); - --cell; - cell->set_refine_flag (); -// tria.save_refine_flags (history); - tria.execute_coarsening_and_refinement (); - }; - -// tria.refine_global (5); - - break; - } - - case 2: - case 3: - { - if (dim==3) - { - tria.begin_active()->face(2)->set_boundary_indicator(1); - tria.begin_active()->face(4)->set_boundary_indicator(1); - }; - - - // set the boundary function - Ball ball; - CurvedLine curved_line; - if (test_case==2) - { - tria.set_boundary (0, ball); - tria.set_boundary (1, ball); - } else { - tria.set_boundary (0, curved_line); - tria.set_boundary (1, curved_line); - }; - - // refine once - tria.begin_active()->set_refine_flag(); - tria.execute_coarsening_and_refinement (); - - Triangulation::active_cell_iterator cell, endc; - const unsigned int steps[4] = { 0, 10, 7, 2 }; - for (unsigned int i=0; iat_boundary()) - cell->set_refine_flag(); - - tria.execute_coarsening_and_refinement(); - }; - - tria.set_boundary (0); - tria.set_boundary (1); - - break; - } - - case 4: - { - // refine once - tria.begin_active()->set_refine_flag(); - tria.execute_coarsening_and_refinement (); - - Triangulation::active_cell_iterator cell, endc; - for (int i=0; i<(dim==2 ? 13 : (dim==3 ? 7 : 30)); ++i) - { - int n_levels = tria.n_levels(); - cell = tria.begin_active(); - endc = tria.end(); - - for (; cell!=endc; ++cell) - { - double r = rand()*1.0/RAND_MAX, - weight = 1.* - (cell->level()*cell->level()) / - (n_levels*n_levels); - - if (r <= 0.5*weight) - cell->set_refine_flag (); - }; - - tria.execute_coarsening_and_refinement (); - }; - break; - } - }; - - - - // output the grid - string filename("results/"); - filename += ('0'+dim); - filename += "d."; - filename += ('0'+test_case); - filename += ".eps"; - - ofstream out(filename.c_str()); - GridOut grid_out; - GridOut::EpsFlags<3> eps_flags; - eps_flags.azimut_angle += 20; - eps_flags.turn_angle += 20; - grid_out.set_flags (eps_flags); - grid_out.write_eps (tria, out); - - cout << " Total number of cells = " << tria.n_cells() << endl - << " Total number of active cells = " << tria.n_active_cells() << endl; -}; - - - -int main (int argc, char **argv) { - if (argc!=3) - { - cout << "Usage: grid_test dimension testcase" << endl << endl - << "Dimension: 2 or 3" << endl << endl - << "Testcases:" << endl - << " 1: continuous refinement of the unit square/cube always in the middle" << endl - << " 2: refinement of the circle/sphere at the boundary" << endl - << " 3: refinement of a wiggled area at the boundary" << endl - << " 4: random refinement" << endl << endl; - return 1; - }; - - if (argv[1][0] == '2') - test<2> (argv[2][0]-'0'); - else - test<3> (argv[2][0]-'0'); - - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/grid/make_ps b/deal.II/deal.II/Attic/examples/grid/make_ps deleted file mode 100644 index 21782a3337..0000000000 --- a/deal.II/deal.II/Attic/examples/grid/make_ps +++ /dev/null @@ -1,43 +0,0 @@ -set size 0.721,1 -set data style lines -set noxtics -set noytics -set noztics -set noxzeroaxis -set noyzeroaxis -#set nozzeroaxis -set nokey -set term postscript eps - -!echo " Making " -set output "results/2d.1.eps" -plot "results/2d.1" - -!echo " Making " -set output "results/2d.2.eps" -plot "results/2d.2" - -!echo " Making " -set output "results/2d.3.eps" -plot "results/2d.3" - -!echo " Making " -set output "results/2d.4.eps" -plot "results/2d.4" - - -!echo " Making " -set output "results/3d.1.eps" -splot "results/3d.1" - -!echo " Making " -set output "results/3d.2.eps" -splot "results/3d.2" - -!echo " Making " -set output "results/3d.3.eps" -splot "results/3d.3" - -!echo " Making " -set output "results/3d.4.eps" -splot "results/3d.4" diff --git a/deal.II/deal.II/Attic/examples/grid/results/.cvsignore b/deal.II/deal.II/Attic/examples/grid/results/.cvsignore deleted file mode 100644 index 480cb8565d..0000000000 --- a/deal.II/deal.II/Attic/examples/grid/results/.cvsignore +++ /dev/null @@ -1 +0,0 @@ -?d.* diff --git a/deal.II/deal.II/Attic/examples/multigrid/.cvsignore b/deal.II/deal.II/Attic/examples/multigrid/.cvsignore deleted file mode 100644 index ab98be5ee1..0000000000 --- a/deal.II/deal.II/Attic/examples/multigrid/.cvsignore +++ /dev/null @@ -1,4 +0,0 @@ -multigrid -Makefile.dep -*.go -*.o diff --git a/deal.II/deal.II/Attic/examples/multigrid/Makefile b/deal.II/deal.II/Attic/examples/multigrid/Makefile deleted file mode 100644 index c0b78a5d49..0000000000 --- a/deal.II/deal.II/Attic/examples/multigrid/Makefile +++ /dev/null @@ -1,169 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998, 1999, 2000 - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = multigrid - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../.. - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - gnuplot make_ps - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/multigrid/make_ps b/deal.II/deal.II/Attic/examples/multigrid/make_ps deleted file mode 100644 index 76c13a8624..0000000000 --- a/deal.II/deal.II/Attic/examples/multigrid/make_ps +++ /dev/null @@ -1,52 +0,0 @@ -set term postscript eps -set xlabel "Number of degrees of freedom" -set data style linespoints -set logscale xy - - - -set ylabel "Error" - -set output "criss-cross.eps" - -plot "criss_cross.history" using 1:2 title "L1 error","criss_cross.history" using 1:3 title "L2 error","criss_cross.history" using 1:4 title "Linfty error","criss_cross.history" using 1:5 title "H1 seminorm error","criss_cross.history" using 1:6 title "H1 error" - - - -set output "linear.eps" - -plot "linear.history" using 1:2 title "L1 error","linear.history" using 1:3 title "L2 error","linear.history" using 1:4 title "Linfty error","linear.history" using 1:5 title "H1 seminorm error","linear.history" using 1:6 title "H1 error" - - - -set output "quadratic.eps" - -plot "quadratic.history" using 1:2 title "L1 error","quadratic.history" using 1:3 title "L2 error","quadratic.history" using 1:4 title "Linfty error","quadratic.history" using 1:5 title "H1 seminorm error","quadratic.history" using 1:6 title "H1 error" - - - -set output "cubic.eps" - -plot "cubic.history" using 1:2 title "L1 error","cubic.history" using 1:3 title "L2 error","cubic.history" using 1:4 title "Linfty error","cubic.history" using 1:5 title "H1 seminorm error","cubic.history" using 1:6 title "H1 error" - - - -set output "quartic.eps" - -plot "quartic.history" using 1:2 title "L1 error","quartic.history" using 1:3 title "L2 error","quartic.history" using 1:4 title "Linfty error","quartic.history" using 1:5 title "H1 seminorm error","quartic.history" using 1:6 title "H1 error" - - - -set output "l2error.eps" -set ylabel "L2-error" - -plot "criss_cross.history" using 1:3 title "Criss-cross elements", "linear.history" using 1:3 title "Linear elements", "quadratic.history" using 1:3 title "Quadratic elements", "cubic.history" using 1:3 title "Cubic elements", "quartic.history" using 1:3 title "Quartic elements" - - - -set output "h1error.eps" -set ylabel "H1-error" - -plot "criss_cross.history" using 1:6 title "Criss-cross elements", "linear.history" using 1:6 title "Linear elements", "quadratic.history" using 1:6 title "Quadratic elements", "cubic.history" using 1:6 title "Cubic elements", "quartic.history" using 1:6 title "Quartic elements" - - diff --git a/deal.II/deal.II/Attic/examples/multigrid/multigrid.cc b/deal.II/deal.II/Attic/examples/multigrid/multigrid.cc deleted file mode 100644 index efe706d682..0000000000 --- a/deal.II/deal.II/Attic/examples/multigrid/multigrid.cc +++ /dev/null @@ -1,515 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include - -#include - - - -template -class LaplaceProblem -{ - public: - LaplaceProblem (); - ~LaplaceProblem (); - void run (); - - private: - void setup_system (); - void assemble_system (); - void solve (); - void refine_grid (); - void output_results (const unsigned int cycle) const; - - Triangulation triangulation; - MGDoFHandler mg_dof_handler; - - FEQ1 fe; - - ConstraintMatrix hanging_node_constraints; - - SparsityPattern global_sparsity_pattern; - SparseMatrix global_system_matrix; - - MGLevelObject level_sparsity_patterns; - MGLevelObject > level_system_matrices; - - Vector solution; - Vector system_rhs; -}; - - - -template -class Coefficient : public Function -{ - public: - virtual double value (const Point &p, - const unsigned int component = 0) const; - - virtual void value_list (const vector > &points, - vector &values, - const unsigned int component = 0) const; -}; - - - -template -double Coefficient::value (const Point &p, - const unsigned int) const -{ - if (p.square() < 0.5*0.5) - return 20; - else - return 1; -}; - - - -template -void Coefficient::value_list (const vector > &points, - vector &values, - const unsigned int component) const -{ - const unsigned int n_points = points.size(); - - Assert (values.size() == n_points, - ExcVectorHasWrongSize (values.size(), n_points)); - - Assert (component == 0, - ExcWrongComponent (component, 1)); - - for (unsigned int i=0; i > >matrices; - public: - MGSmootherLAC(MGLevelObject >&); - - virtual void smooth (const unsigned int level, - Vector &u, - const Vector &rhs) const; -}; - - -MGSmootherLAC::MGSmootherLAC(MGLevelObject >& matrix) - : - matrices(&matrix) -{} - - -void -MGSmootherLAC::smooth (const unsigned int level, - Vector &u, - const Vector &rhs) const -{ - SolverControl control(2,1.e-300,false,false); - PrimitiveVectorMemory<> mem; - SolverRichardson<> rich(control, mem); - PreconditionRelaxation<> - prec((*matrices)[level], &SparseMatrix ::template precondition_SSOR, 1.); - - rich.solve((*matrices)[level], u, rhs, prec); -} - - - -template -LaplaceProblem::LaplaceProblem () : - mg_dof_handler (triangulation) -{}; - - - -template -LaplaceProblem::~LaplaceProblem () -{ - mg_dof_handler.clear (); -}; - - - -template -void LaplaceProblem::setup_system () -{ - mg_dof_handler.distribute_dofs (fe); - - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (mg_dof_handler, - hanging_node_constraints); - hanging_node_constraints.close (); - global_sparsity_pattern.reinit (mg_dof_handler.DoFHandler::n_dofs(), - mg_dof_handler.DoFHandler::n_dofs(), - mg_dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (mg_dof_handler, global_sparsity_pattern); - hanging_node_constraints.condense (global_sparsity_pattern); - global_sparsity_pattern.compress(); - - global_system_matrix.reinit (global_sparsity_pattern); - - solution.reinit (mg_dof_handler.DoFHandler::n_dofs()); - system_rhs.reinit (mg_dof_handler.DoFHandler::n_dofs()); - - - const unsigned int n_levels = triangulation.n_levels(); - level_system_matrices.resize (0, n_levels); - level_sparsity_patterns.resize (0, n_levels); - - for (unsigned int level=0; level -void LaplaceProblem::assemble_system () -{ - const Coefficient coefficient; - - QGauss2 quadrature_formula; - - FEValues fe_values (fe, quadrature_formula, - UpdateFlags(update_values | - update_gradients | - update_q_points | - update_JxW_values)); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.n_quadrature_points; - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - vector local_dof_indices (dofs_per_cell); - - // FIX - vector coefficient_values (n_q_points, 1.0); - - // not only active cells - MGDoFHandler::cell_iterator cell = mg_dof_handler.begin(), - endc = mg_dof_handler.end(); - for (; cell!=endc; ++cell) - { - cell_matrix.clear (); - cell_rhs.clear (); - - fe_values.reinit (cell); - const FullMatrix - & shape_values = fe_values.get_shape_values(); - const vector > > - & shape_grads = fe_values.get_shape_grads(); - const vector - & JxW_values = fe_values.get_JxW_values(); - const vector > - & q_points = fe_values.get_quadrature_points(); - - // FIX -// coefficient.value_list (q_points, coefficient_values); - - for (unsigned int q_point=0; q_pointget_mg_dof_indices (local_dof_indices); - const unsigned int level = cell->level(); - for (unsigned int i=0; iactive()) - { - cell->get_dof_indices (local_dof_indices); - for (unsigned int i=0; i boundary_values; -// VectorTools::interpolate_boundary_values (mg_dof_handler, -// 0, -// ZeroFunction(), -// boundary_values); -// MatrixTools::apply_boundary_values (boundary_values, -// global_system_matrix, -// solution, -// system_rhs); -}; - - - -template -void LaplaceProblem::solve () -{ - - { - SolverControl solver_control (1000, 1e-12); - PrimitiveVectorMemory<> vector_memory; - SolverCG<> cg (solver_control, vector_memory); - - SolverControl coarse_grid_solver_control (1000, 1e-12); - PrimitiveVectorMemory<> coarse_grid_vector_memory; - - SolverCG<> coarse_grid_cg (coarse_grid_solver_control, - coarse_grid_vector_memory); - -// PreconditionRelaxation<> -// coarse_grid_solver_preconditioner(level_system_matrices[level_system_matrices.get_minlevel()], -// &SparseMatrix::template precondition_SSOR, -// 1.2); - PreconditionIdentity coarse_grid_solver_preconditioner; - - MGCoarseGridLACIteration, SparseMatrix, PreconditionIdentity> - coarse_grid_solver (coarse_grid_cg, - level_system_matrices[level_system_matrices.get_minlevel()], - coarse_grid_solver_preconditioner); - - MGSmootherLAC smoother (level_system_matrices); - MGTransferPrebuilt grid_transfer; - grid_transfer.build_matrices (mg_dof_handler); - - Multigrid<2> multigrid (mg_dof_handler, - hanging_node_constraints, - level_sparsity_patterns, - level_system_matrices, - grid_transfer); - - PreconditionMG > - mg_precondition (multigrid, smoother, smoother, coarse_grid_solver); - - solution.clear (); - cg.solve (global_system_matrix, solution, system_rhs, - mg_precondition); - - cout << " MG Outer iterations: " << solver_control.last_step() - << endl; - - cout << " MG Total inner iterations: " << coarse_grid_solver_control.last_step() - << endl; - }; - - { - SolverControl solver_control (1000, 1e-12); - PrimitiveVectorMemory<> vector_memory; - SolverCG<> cg (solver_control, vector_memory); - - PreconditionRelaxation<> - preconditioner(global_system_matrix, - &SparseMatrix::template precondition_SSOR, - 1.2); - - solution.clear (); - cg.solve (global_system_matrix, solution, system_rhs, - preconditioner); - - cout << " CG Outer iterations: " << solver_control.last_step() - << endl; - }; - - hanging_node_constraints.distribute (solution); -}; - - -template -void LaplaceProblem::refine_grid () -{ - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - KellyErrorEstimator::FunctionMap neumann_boundary; - KellyErrorEstimator::estimate (mg_dof_handler, - QGauss3(), - neumann_boundary, - solution, - estimated_error_per_cell); - - triangulation.refine_and_coarsen_fixed_number (estimated_error_per_cell, - 0.3, 0.03); - triangulation.execute_coarsening_and_refinement (); -}; - - - -template -void LaplaceProblem::output_results (const unsigned int cycle) const -{ - string filename = "grid-"; - filename += ('0' + cycle); - Assert (cycle < 10, ExcInternalError()); - - filename += ".eps"; - ofstream output (filename.c_str()); - - GridOut grid_out; - grid_out.write_eps (triangulation, output); -}; - - - -template -void LaplaceProblem::run () -{ - for (unsigned int cycle=0; cycle<8; ++cycle) - { - cout << "Cycle " << cycle << ':' << endl; - - if (cycle == 0) - { - GridGenerator::hyper_cube (triangulation); - triangulation.refine_global (1); - } - else - { - refine_grid (); - }; - - - cout << " Number of active cells: " - << triangulation.n_active_cells() - << endl; - - setup_system (); - - cout << " Number of degrees of freedom: " - << mg_dof_handler.DoFHandler::n_dofs() - << endl; - - assemble_system (); - solve (); - output_results (cycle); - - DataOut::EpsFlags eps_flags; - eps_flags.z_scaling = 4; - - DataOut data_out; - data_out.set_flags (eps_flags); - - data_out.attach_dof_handler (mg_dof_handler); - data_out.add_data_vector (solution, "solution"); - data_out.build_patches (); - - ofstream output ("final-solution.eps"); - data_out.write_eps (output); - }; -}; - - - -int main () -{ - try - { - deallog.depth_console (0); - - LaplaceProblem<2> laplace_problem_2d; - laplace_problem_2d.run (); - } - catch (exception &exc) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Exception on processing: " << endl - << exc.what() << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - return 1; - } - catch (...) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Unknown exception!" << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - return 1; - }; - - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/Makefile b/deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/Makefile deleted file mode 100644 index 0010ae461f..0000000000 --- a/deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/Makefile +++ /dev/null @@ -1,133 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998 - -# Template for makefiles for the examples subdirectory. In principle, -# everything should be done automatically if you set the target file -# here correctly: -target = nonlinear - -# All dependencies between files should be updated by the included -# file Makefile.dep if necessary. Object files are compiled into -# the archives ./Obj.a and ./Obj.g.a. By default, the debug version -# is used to link. It you don't like that, change the following -# variable to "off" -debug-mode = off - -# If you want your program to be linked with extra object or library -# files, specify them here: -user-libs = - -# To run the program, use "make run"; to give parameters to the program, -# give the parameters to the following variable: -run-parameters = $(target).prm - -# To execute additional action apart from running the program, fill -# in this list: -additional-run-action = gnuplot make_ps - -# To specify which files are to be deleted by "make clean" (apart from -# the usual ones: object files, executables, backups, etc), fill in the -# following list -delete-files = gnuplot* *.eps - - - - -############################################################################### -# Internals - -#deal include base path -D = ../../../.. - -include ../../../Make.global_options - - - -# get lists of files we need -cc-files = $(filter-out *%, $(shell echo *.cc)) -o-files = $(cc-files:.cc=.o) -go-files = $(cc-files:.cc=.go) -h-files = $(filter-out *%, $(shell echo *.h)) -lib-h-files = $(filter-out *%, $(shell echo ../../include/*.h)) - -# list of libraries needed to link with -libs.g = ./Obj.g.a \ - $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs = ./Obj.a \ - $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# check whether we use debug mode or not -ifeq ($(debug-mode),on) -libraries = $(libs.g) -flags = $(CXXFLAGS.g) -endif - -ifeq ($(debug-mode),off) -libraries = $(libs) -flags = $(CXXFLAGS) -endif - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - -# make rule for the target -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ $(user-libs) - -# rule how to run the program -run: $(target) - $(target) $(run-parameters) - $(additional-run-action) - - -# rule to make object files -%.go : %.cc - @echo ============================ Compiling with debugging information: $< - @echo $(CXX) ... -c $< -o $@ - @$(CXX) $(CXXFLAGS.g) -c $< -o $@ -%.o : %.cc - @echo ============================ Compiling with optimization: $< - @echo $(CXX) ... -c $< -o $@ - @$(CXX) $(CXXFLAGS) -c $< -o $@ - - -# rules which files the libraries depend upon -Obj.a: ./Obj.a($(o-files)) -Obj.g.a: ./Obj.g.a($(go-files)) - - -clean: - -rm -f *.o *.go *~ Makefile.dep Obj.a Obj.g.a $(target) $(delete-files) - - - -.PHONY: clean - - -#Rule to generate the dependency file. This file is -#automagically remade whenever needed, i.e. whenever -#one of the cc-/h-files changed. Make detects whether -#to remake this file upon inclusion at the bottom -#of this file. -# -#use perl to generate rules for the .go files as well -#as to make rules not for tria.o and the like, but -#rather for libnumerics.a(tria.o) -Makefile.dep: $(cc-files) $(h-files) $(lib-h-files) - @echo ============================ Remaking Makefile - @perl ../../../Make_dep.pl ./Obj $(INCLUDE) $(cc-files) \ - > Makefile.dep - - -include Makefile.dep - diff --git a/deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/nonlinear.cc b/deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/nonlinear.cc deleted file mode 100644 index 30155302fa..0000000000 --- a/deal.II/deal.II/Attic/examples/nonlinear/fixed-point-iteration/nonlinear.cc +++ /dev/null @@ -1,253 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - - - - - -template -class RightHandSide : public Function -{ - public: - double value (const Point &p) const - { - double x = 80; - for (unsigned int d=0; d -class PoissonEquation : public Equation { - public: - PoissonEquation (const Function &rhs, - const Vector &last_solution) : - Equation(1), - right_hand_side (rhs), - last_solution(last_solution) {}; - - virtual void assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (FullMatrix &cell_matrix, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - protected: - const Function &right_hand_side; - const Vector &last_solution; -}; - - - - - - -template -class NonlinearProblem : public ProblemBase { - public: - NonlinearProblem (); - void run (); - - protected: - Triangulation *tria; - DoFHandler *dof; - - Vector last_solution; -}; - - - - -template -void PoissonEquation::assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &) const { - const vector > >&gradients = fe_values.get_shape_grads (); - const FullMatrix &values = fe_values.get_shape_values (); - vector rhs_values (fe_values.n_quadrature_points); - const vector &weights = fe_values.get_JxW_values (); - - vector > last_solution_grads(fe_values.n_quadrature_points); - fe_values.get_function_grads (last_solution, last_solution_grads); - - - right_hand_side.value_list (fe_values.get_quadrature_points(), rhs_values); - - for (unsigned int point=0; point -void PoissonEquation::assemble (FullMatrix &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - -template -void PoissonEquation::assemble (Vector &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - - -template -NonlinearProblem::NonlinearProblem () : - tria(0), dof(0) {}; - - - -template -void NonlinearProblem::run () { - - // first reset everything to a virgin state - clear (); - - tria = new Triangulation(); - dof = new DoFHandler (tria); - set_tria_and_dof (tria, dof); - - - RightHandSide rhs; - ZeroFunction boundary_values; - StraightBoundary boundary; - - FELinear fe; - PoissonEquation equation (rhs, last_solution); - QGauss2 quadrature; - - ProblemBase::FunctionMap dirichlet_bc; - dirichlet_bc[0] = &boundary_values; - - - GridGenerator::hypercube (*tria); - tria->refine_global (4); - - for (unsigned int refinement_step=0; refinement_step<10; ++refinement_step) - { - cout << "Refinement step " << refinement_step << endl - << " Grid has " << tria->n_active_cells() << " active cells." << endl; - - cout << " Distributing dofs... "; - dof->distribute_dofs (fe); - cout << dof->n_dofs() << " degrees of freedom." << endl; - - // set the starting values for the iteration - // to a constant value of 1 - last_solution.reinit (dof->n_dofs()); - for (unsigned int i=0; in_dofs(); ++i) - last_solution(i) = 1; - - - // here comes the fixed point iteration - for (unsigned int nonlinear_step=0; nonlinear_step<10; ++nonlinear_step) - { - cout << " Nonlinear step " << nonlinear_step << endl; - cout << " Assembling matrices..." << endl; - assemble (equation, quadrature, fe, - UpdateFlags(update_values | update_gradients | - update_JxW_values | update_q_points), - dirichlet_bc); - - cout << " Solving..." << endl; - solve (); - - if (nonlinear_step % 2 == 0) - { - string filename = "nonlinear."; - filename += ('0' + refinement_step); - filename += '.'; - filename += ('0' + (nonlinear_step/2)); - filename += ".gnuplot"; - cout << " Writing to file <" << filename << ">..." << endl; - - DataOut out; - ofstream gnuplot(filename.c_str()); - fill_data (out); - out.write_gnuplot (gnuplot); - gnuplot.close (); - }; - - last_solution = solution; - }; - - Vector error_indicator; - KellyErrorEstimator ee; - QSimpson eq; - ee.estimate_error (*dof, eq, fe, - KellyErrorEstimator::FunctionMap(), - solution, - error_indicator); - tria->refine_and_coarsen_fixed_number (error_indicator, 0.3, 0); - tria->execute_coarsening_and_refinement (); - }; - - - delete dof; - delete tria; - - cout << endl; -}; - - - - -int main () -{ - NonlinearProblem<2> problem; - problem.run (); -}; diff --git a/deal.II/deal.II/Attic/examples/poisson/.cvsignore b/deal.II/deal.II/Attic/examples/poisson/.cvsignore deleted file mode 100644 index 572d49119c..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/.cvsignore +++ /dev/null @@ -1,6 +0,0 @@ -poisson -Makefile.dep -*.go -Makefile.dep -*.go -*.o diff --git a/deal.II/deal.II/Attic/examples/poisson/Makefile b/deal.II/deal.II/Attic/examples/poisson/Makefile deleted file mode 100644 index 2c1e787bc5..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/Makefile +++ /dev/null @@ -1,134 +0,0 @@ -# $Id$ -# Copyright W. Bangerth, University of Heidelberg, 1998 - -# Template for makefiles for the examples subdirectory. In principle, -# everything should be done automatically if you set the target file -# here correctly: -target = poisson - -# All dependencies between files should be updated by the included -# file Makefile.dep if necessary. Object files are compiled into -# the archives ./Obj.a and ./Obj.g.a. By default, the debug version -# is used to link. It you don't like that, change the following -# variable to "off" -debug-mode = on - -# If you want your program to be linked with extra object or library -# files, specify them here: -user-libs = - -# To run the program, use "make run"; to give parameters to the program, -# give the parameters to the following variable: -run-parameters = $(target).prm - -# To execute additional action apart from running the program, fill -# in this list: -additional-run-action = cd results ; gnuplot make_ps - -# To specify which files are to be deleted by "make clean" (apart from -# the usual ones: object files, executables, backups, etc), fill in the -# following list -delete-files = results/*gnuplot results/*.eps - - - - -############################################################################### -# Internals - -#deal include base path -D = ../../.. - -include $D/common/Make.global_options - - - -# get lists of files we need -cc-files = $(filter-out *%, $(shell echo *.cc)) -o-files = $(cc-files:.cc=.o) -go-files = $(cc-files:.cc=.go) -h-files = $(filter-out *%, $(shell echo *.h)) -lib-h-files = $(filter-out *%, $(shell echo ../../include/*/*.h)) - -# list of libraries needed to link with -libs.g = ./Obj.g.a \ - $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs = ./Obj.a \ - $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - - -# check whether we use debug mode or not -ifeq ($(debug-mode),on) -libraries = $(libs.g) -flags = $(CXXFLAGS.g) -endif - -ifeq ($(debug-mode),off) -libraries = $(libs) -flags = $(CXXFLAGS) -endif - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - -# make rule for the target -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ $(user-libs) - -# rule how to run the program -run: $(target) - ./$(target) $(run-parameters) - $(additional-run-action) - - -# rule to make object files -%.go : %.cc - @echo ============================ Compiling with debugging information: $< - @echo $(CXX) ... -c $< -o $@ - @$(CXX) $(CXXFLAGS.g) -c $< -o $@ -%.o : %.cc - @echo ============================ Compiling with optimization: $< - @echo $(CXX) ... -c $< -o $@ - @$(CXX) $(CXXFLAGS.o) -c $< -o $@ - - -# rules which files the libraries depend upon -Obj.a: ./Obj.a($(o-files)) -Obj.g.a: ./Obj.g.a($(go-files)) - - -clean: - -rm -f *.o *.go *~ Makefile.dep Obj.a Obj.g.a $(target) $(delete-files) - - - -.PHONY: clean - - -#Rule to generate the dependency file. This file is -#automagically remade whenever needed, i.e. whenever -#one of the cc-/h-files changed. Make detects whether -#to remake this file upon inclusion at the bottom -#of this file. -# -#use perl to generate rules for the .go files as well -#as to make rules not for tria.o and the like, but -#rather for libnumerics.a(tria.o) -Makefile.dep: $(cc-files) $(h-files) $(lib-h-files) - @echo ============================ Remaking Makefile - @perl $D/common/scripts/Make_dep.pl ./Obj $(INCLUDE) $(cc-files) \ - > Makefile.dep - - -include Makefile.dep - diff --git a/deal.II/deal.II/Attic/examples/poisson/equation.cc b/deal.II/deal.II/Attic/examples/poisson/equation.cc deleted file mode 100644 index 9610c7e888..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/equation.cc +++ /dev/null @@ -1,86 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - -#include "poisson.h" -#include - - - -#if deal_II_dimension == 1 - -template <> -void PoissonEquation<1>::assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues<1> &fe_values, - const DoFHandler<1>::cell_iterator &) const { - for (unsigned int point=0; point= 2 - -template -void PoissonEquation::assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &) const { - const vector > >&gradients = fe_values.get_shape_grads (); - const FullMatrix &values = fe_values.get_shape_values (); - vector rhs_values (fe_values.n_quadrature_points); - const vector &weights = fe_values.get_JxW_values (); - - right_hand_side.value_list (fe_values.get_quadrature_points(), rhs_values); - - for (unsigned int point=0; point -void PoissonEquation::assemble (FullMatrix &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - -template -void PoissonEquation::assemble (Vector &, - const FEValues &, - const DoFHandler::cell_iterator &) const { - Assert (false, ExcPureVirtualFunctionCalled()); -}; - - - - - - -template class PoissonEquation<2>; diff --git a/deal.II/deal.II/Attic/examples/poisson/poisson.cc b/deal.II/deal.II/Attic/examples/poisson/poisson.cc deleted file mode 100644 index bbfa7841ea..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/poisson.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - - -#include "poisson.h" -#include -#include - - -int main (int argc, char **argv) { - if (argc!=2) - { - cout << "Usage: poisson parameterfile" << endl << endl; - return 1; - }; - - // no additional output to console - deallog.depth_console (0); - - PoissonProblem<2> poisson; - MultipleParameterLoop input_data; - - poisson.declare_parameters(input_data); - input_data.read_input (argv[1]); - input_data.loop (poisson); - - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/poisson/poisson.h b/deal.II/deal.II/Attic/examples/poisson/poisson.h deleted file mode 100644 index ece30b3451..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/poisson.h +++ /dev/null @@ -1,101 +0,0 @@ -/*---------------------------- poisson.h ---------------------------*/ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ -#ifndef __poisson_H -#define __poisson_H -/*---------------------------- poisson.h ---------------------------*/ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#include -#include -#include -#include - - - - - - - -template -class PoissonEquation : public Equation { - public: - PoissonEquation (const Function &rhs) : - Equation(1), - right_hand_side (rhs) {}; - - virtual void assemble (FullMatrix &cell_matrix, - Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (FullMatrix &cell_matrix, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - virtual void assemble (Vector &rhs, - const FEValues &fe_values, - const DoFHandler::cell_iterator &cell) const; - protected: - const Function &right_hand_side; -}; - - - - - - -template -class PoissonProblem : public ProblemBase, - public MultipleParameterLoop::UserClass { - public: - PoissonProblem (); - virtual ~PoissonProblem(); - - void clear (); - - virtual void create_new (const unsigned int run_no); - virtual void declare_parameters (ParameterHandler &prm); - virtual void run (ParameterHandler &prm); - - - bool make_grid (ParameterHandler &prm); - void make_zoom_in_grid (); - void make_random_grid (); - - bool set_right_hand_side (ParameterHandler &prm); - bool set_boundary_values (ParameterHandler &prm); - - protected: - Triangulation *tria; - DoFHandler *dof; - - Function *rhs; - Function *boundary_values; - - Boundary *boundary; -}; - - - - - -/*---------------------------- poisson.h ---------------------------*/ -/* end of #ifndef __poisson_H */ -#endif -/*---------------------------- poisson.h ---------------------------*/ diff --git a/deal.II/deal.II/Attic/examples/poisson/poisson.prm b/deal.II/deal.II/Attic/examples/poisson/poisson.prm deleted file mode 100644 index 434c6ff65f..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/poisson.prm +++ /dev/null @@ -1,5 +0,0 @@ -set Test run = { zoom in | ball | curved line | random | jump | L-region | slit domain} -set Global refinement = {{ 2 | 5 | 6 | 0 | 3 | 5 | 5 }} -set Right hand side = {{ zero | zero | trigpoly | constant | zero | zero | poly }} -set Boundary values = {{ sine | sine | zero | zero | jump | sine | sine }} -set Output file = results/{{ zoom_in | ball | curved_line | random | jump | L-region | slit_domain }}.gnuplot diff --git a/deal.II/deal.II/Attic/examples/poisson/problem.cc b/deal.II/deal.II/Attic/examples/poisson/problem.cc deleted file mode 100644 index b5de827b16..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/problem.cc +++ /dev/null @@ -1,622 +0,0 @@ -/* $Id$ */ -/* Copyright W. Bangerth, University of Heidelberg, 1998 */ - - - -#include "poisson.h" -#include -#include -#include -#include - - -template -class BoundaryValuesSine : public Function { - public: - /** - * Return the value of the function - * at the given point. - */ - virtual double value (const Point &p, - const unsigned int component) const { - Assert (component==0, ExcIndexRange (component, 0, 1)); - - double x = 1; - - for (unsigned int i=0; i &p, - Vector &values) const { - Assert (values.size()==1, ExcVectorHasWrongSize (values.size(), 1)); - - double x = 1; - - for (unsigned int i=0; i > &points, - vector &values, - const unsigned int component) const { - Assert (values.size() == points.size(), - ExcVectorHasWrongSize(values.size(), points.size())); - for (unsigned int i=0; i::value (points[i], component); - }; -}; - - - -template -class BoundaryValuesJump : public Function { - public: - /** - * Return the value of the function - * at the given point. - */ - virtual double value (const Point &p, - const unsigned int component) const { - Assert (component==0, ExcIndexRange (component, 0, 1)); - switch (dim) - { - case 1: - return 0; - default: - if (p(0) == p(1)) - return 0.5; - else - return (p(0)>p(1) ? 0. : 1.); - }; - }; -}; - - - - -template -class RHSTrigPoly : public Function { - public: - /** - * Return the value of the function - * at the given point. - */ - virtual double value (const Point &p, - const unsigned int) const; -}; - - - -/** - Right hand side constructed such that the exact solution is - $x(1-x)$ in 1d, $x(1-x)*y(1-y)$ in 2d, etc. - */ -template -class RHSPoly : public Function { - public: - /** - * Return the value of the function - * at the given point. - */ - virtual double value (const Point &p, - const unsigned int) const; -}; - - - - - - - - - -template -class CurvedLine : - public StraightBoundary { - public: - virtual Point - get_new_point_on_line (const typename Triangulation::line_iterator &line) const; - - virtual Point - get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const; -}; - - - -template -Point -CurvedLine::get_new_point_on_line (const typename Triangulation::line_iterator &line) const -{ - Point middle = StraightBoundary::get_new_point_on_line (line); - - // if the line is at the top of bottom - // face: do a special treatment on - // this line. Note that if the - // z-value of the midpoint is either - // 0 or 1, then the z-values of all - // vertices of the line is like that - if (dim>=3) - if (((middle(2) == 0) || (middle(2) == 1)) - // find out, if the line is in the - // interior of the top or bottom face - // of the domain, or at the edge. - // lines at the edge need to undergo - // the usual treatment, while for - // interior lines taking the midpoint - // is sufficient - // - // note: the trick with the boundary - // id was invented after the above was - // written, so we are not very strict - // here with using these flags - && (line->boundary_indicator() == 1)) - return middle; - - - double x=middle(0), - y=middle(1); - - if (y -Point -CurvedLine::get_new_point_on_quad (const typename Triangulation::quad_iterator &quad) const -{ - Point middle = StraightBoundary::get_new_point_on_quad (quad); - - // if the face is at the top of bottom - // face: do not move the midpoint in - // x/y direction. Note that if the - // z-value of the midpoint is either - // 0 or 1, then the z-values of all - // vertices of the quad is like that - if ((middle(2) == 0) || (middle(2) == 1)) - return middle; - - double x=middle(0), - y=middle(1); - - if (y -double RHSTrigPoly::value (const Point &p, - const unsigned int component) const { - Assert (component==0, ExcIndexRange (component, 0, 1)); - - const double pi = 3.1415926536; - switch (dim) - { - case 1: - return p(0)*p(0)*cos(2*pi*p(0)); - case 2: - return (-2.0*cos(pi*p(0)/2)*p(1)*sin(pi*p(1)) + - 2.0*p(0)*sin(pi*p(0)/2)*pi*p(1)*sin(pi*p(1)) + - 5.0/4.0*p(0)*p(0)*cos(pi*p(0)/2)*pi*pi*p(1)*sin(pi*p(1)) - - 2.0*p(0)*p(0)*cos(pi*p(0)/2)*cos(pi*p(1))*pi); - default: - return 0; - }; -}; - - - -template -double RHSPoly::value (const Point &p, - const unsigned int component) const { - Assert (component==0, ExcIndexRange (component, 0, 1)); - - double ret_val = 0; - for (unsigned int i=0; i -PoissonProblem::PoissonProblem () : - tria(0), dof(0), rhs(0), boundary_values(0), boundary(0) {}; - - - -template -PoissonProblem::~PoissonProblem () -{ - clear (); -}; - - - -template -void PoissonProblem::clear () { - if (dof != 0) { - delete dof; - dof = 0; - }; - - if (boundary != 0) - { - tria->set_boundary (0); - delete boundary; - boundary = 0; - }; - - if (tria != 0) { - delete tria; - tria = 0; - }; - - // make it known to the underlying - // ProblemBase that tria and dof - // are already deleted - set_tria_and_dof (tria, dof); - - - if (rhs != 0) - { - delete rhs; - rhs = 0; - }; - - if (boundary_values != 0) - { - delete boundary_values; - boundary_values = 0; - }; - - ProblemBase::clear (); -}; - - - - -template -void PoissonProblem::create_new (const unsigned int) { - clear (); - - tria = new Triangulation(); - dof = new DoFHandler (*tria); - set_tria_and_dof (tria, dof); -}; - - - - -template -void PoissonProblem::declare_parameters (ParameterHandler &prm) { - if (dim>=2) - prm.declare_entry ("Test run", "zoom in", - Patterns::Selection("tensor|zoom in|ball|curved line|" - "random|jump|L-region|slit domain")); - else - prm.declare_entry ("Test run", "zoom in", - Patterns::Selection("tensor|zoom in|random")); - - prm.declare_entry ("Global refinement", "0", - Patterns::Integer()); - prm.declare_entry ("Right hand side", "zero", - Patterns::Selection("zero|constant|trigpoly|poly")); - prm.declare_entry ("Boundary values", "zero", - Patterns::Selection("zero|sine|jump")); - prm.declare_entry ("Output file", "gnuplot.1"); -}; - - - - -template -bool PoissonProblem::make_grid (ParameterHandler &prm) { - string test = prm.get ("Test run"); - unsigned int test_case; - if (test=="zoom in") test_case = 1; - else - if (test=="ball") test_case = 2; - else - if (test=="curved line") test_case = 3; - else - if (test=="random") test_case = 4; - else - if (test=="tensor") test_case = 5; - else - if (test=="jump") test_case = 6; - else - if (test=="L-region") test_case = 7; - else - if (test=="slit domain") test_case = 8; - else - { - cerr << "This test seems not to be implemented!" << endl; - return false; - }; - - switch (test_case) - { - case 1: - boundary = new StraightBoundary(); - tria->set_boundary (0, *boundary); - make_zoom_in_grid (); - break; - case 2: - // make ball grid around origin with - // unit radius - { - static const Point origin; - boundary = new HyperBallBoundary(origin, 1.); - GridGenerator::hyper_ball (*tria, origin, 1.); - tria->set_boundary (0, *boundary); - break; - }; - case 3: - // set the boundary function - { - boundary = new CurvedLine(); - GridGenerator::hyper_cube (*tria); - tria->set_boundary (0, *boundary); - break; - }; - case 4: - boundary = new StraightBoundary(); - tria->set_boundary (0, *boundary); - make_random_grid (); - break; - case 5: - boundary = new StraightBoundary(); - tria->set_boundary (0, *boundary); - GridGenerator::hyper_cube (*tria); - break; - case 6: - boundary = new StraightBoundary(); - tria->set_boundary (0, *boundary); - GridGenerator::hyper_cube (*tria); - tria->refine_global (1); - for (unsigned int i=0; i<5; ++i) - { - tria->begin_active(tria->n_levels()-1)->set_refine_flag(); - (--(tria->last_active()))->set_refine_flag(); - tria->execute_coarsening_and_refinement (); - }; - break; - case 7: - boundary = new StraightBoundary(); - tria->set_boundary (0, *boundary); - GridGenerator::hyper_L (*tria); - break; - case 8: - boundary = new StraightBoundary(); - tria->set_boundary (0, *boundary); - GridGenerator::hyper_cube_slit (*tria); - break; - default: - return false; - }; - - int refine_global = prm.get_integer ("Global refinement"); - if ((refine_global < 0) || (refine_global>10)) - return false; - else - tria->refine_global (refine_global); - - return true; -}; - - - - -template -void PoissonProblem::make_zoom_in_grid () { - GridGenerator::hyper_cube (*tria); - - // refine first cell - tria->begin_active()->set_refine_flag(); - tria->execute_coarsening_and_refinement (); - // refine first active cell - // on coarsest level - tria->begin_active()->set_refine_flag (); - tria->execute_coarsening_and_refinement (); - - Triangulation::active_cell_iterator cell; - for (int i=0; i<(dim==3 ? 5 : 17); ++i) - { - // refine the presently - // second last cell several - // times - cell = tria->last_active(tria->n_levels()-1); - --cell; - cell->set_refine_flag (); - tria->execute_coarsening_and_refinement (); - }; -}; - - - - -template -void PoissonProblem::make_random_grid () { - GridGenerator::hyper_cube (*tria); - tria->refine_global (1); - - Triangulation::active_cell_iterator cell, endc; - for (int i=0; i<(dim==3 ? 7 : 12); ++i) - { - int n_levels = tria->n_levels(); - cell = tria->begin_active(); - endc = tria->end(); - - for (; cell!=endc; ++cell) - { - double r = rand()*1.0/RAND_MAX, - weight = 1.* - (cell->level()*cell->level()) / - (n_levels*n_levels); - - if (r <= 0.5*weight) - cell->set_refine_flag (); - }; - - tria->execute_coarsening_and_refinement (); - }; -}; - - - - -template -bool PoissonProblem::set_right_hand_side (ParameterHandler &prm) { - string rhs_name = prm.get ("Right hand side"); - - if (rhs_name == "zero") - rhs = new ZeroFunction(); - else - if (rhs_name == "constant") - rhs = new ConstantFunction(1.); - else - if (rhs_name == "trigpoly") - rhs = new RHSTrigPoly(); - else - if (rhs_name == "poly") - rhs = new RHSPoly (); - else - return false; - - if (rhs != 0) - return true; - else - return false; -}; - - - -template -bool PoissonProblem::set_boundary_values (ParameterHandler &prm) { - string bv_name = prm.get ("Boundary values"); - - if (bv_name == "zero") - boundary_values = new ZeroFunction (); - else - if (bv_name == "sine") - boundary_values = new BoundaryValuesSine (); - else - if (bv_name == "jump") - boundary_values = new BoundaryValuesJump (); - else - { - cout << "Unknown boundary value function " << bv_name << endl; - return false; - }; - - if (boundary_values != 0) - return true; - else - return false; -}; - - - - -template -void PoissonProblem::run (ParameterHandler &prm) { - cout << "Test case = " << prm.get ("Test run") - << endl; - - cout << " Making grid... "; - if (!make_grid (prm)) - return; - cout << tria->n_active_cells() << " active cells." << endl; - - if (!set_right_hand_side (prm)) - return; - - if (!set_boundary_values (prm)) - return; - - FEQ1 fe; - PoissonEquation equation (*rhs); - QGauss2 quadrature; - - cout << " Distributing dofs... "; - dof->distribute_dofs (fe); - cout << dof->n_dofs() << " degrees of freedom." << endl; - - cout << " Assembling matrices..." << endl; - ProblemBase::FunctionMap dirichlet_bc; - dirichlet_bc[0] = boundary_values; - assemble (equation, quadrature, - UpdateFlags(update_values | update_gradients | - update_JxW_values | update_q_points), - dirichlet_bc); - - cout << " Solving..." << endl; - solve (); - - cout << " Writing to file <" << prm.get("Output file") << ">..." - << endl; - - DataOut out; - string o_filename = prm.get ("Output file"); - ofstream gnuplot(o_filename.c_str()); - out.attach_dof_handler (*dof_handler); - out.add_data_vector (solution, "solution"); - out.build_patches (); - out.write_gnuplot (gnuplot); - gnuplot.close (); - - // release the lock of the DoF object to - // the FE object - dof->clear (); - - cout << endl; -}; - - - - - -template class PoissonProblem<2>; diff --git a/deal.II/deal.II/Attic/examples/poisson/results/.cvsignore b/deal.II/deal.II/Attic/examples/poisson/results/.cvsignore deleted file mode 100644 index acabd78095..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/results/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.gnuplot -*.eps diff --git a/deal.II/deal.II/Attic/examples/poisson/results/make_ps b/deal.II/deal.II/Attic/examples/poisson/results/make_ps deleted file mode 100644 index df05475d6b..0000000000 --- a/deal.II/deal.II/Attic/examples/poisson/results/make_ps +++ /dev/null @@ -1,38 +0,0 @@ -set data style lines -set noxtics -set noytics -set noztics -set nokey -set para -set hidden3d -set term postscript eps - -!echo " Making " -set output "zoom_in.eps" -splot "zoom_in.gnuplot" - -!echo " Making " -set output "ball.eps" -splot "ball.gnuplot" - -!echo " Making " -set output "curved_line.eps" -splot "curved_line.gnuplot" - -!echo " Making " -set output "random.eps" -splot "random.gnuplot" - -!echo " Making " -set output "jump.eps" -splot "jump.gnuplot" - -!echo " Making " -set view 52,115 -set output "L-region.eps" -splot "L-region.gnuplot" - -!echo " Making " -set view 52,115 -set output "slit_domain.eps" -splot "slit_domain.gnuplot" diff --git a/deal.II/deal.II/Attic/examples/step-by-step/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/Makefile deleted file mode 100644 index 6cf1272375..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# $Id$ -# -# This Makefile only recurses into the subdirs - - -# existing examples. take dirnames and strip 'step' -steps = $(shell echo step-*) - - -# default is: build all examples. for each example, there is a target -# build-step-N, where N in [1...] -default: $(addprefix build-,$(steps)) - -# run example programs; make a target run-step-N for each N -run: $(addprefix run-,$(steps)) - -# clean subdirs; make a target clean-step-N for each N -clean: $(addprefix clean-,$(steps)) - - - -# for each build/run/clean target: strip the build- prefix of the -# target and build in that directory -build-step-%: - cd $(@:build-%=%) ; $(MAKE) -run-step-%: - cd $(@:run-%=%) ; $(MAKE) run -clean-step-%: - cd $(@:clean-%=%) ; $(MAKE) clean - - -# all targets in this directory do not produce files, so they are -# .PHONY: -.PHONY: $(addprefix build-step-,$(steps)) \ - $(addprefix run-step-,$(steps)) \ - $(addprefix clean-step-,$(steps)) diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-1/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-1/.cvsignore deleted file mode 100644 index ca83da2788..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-1/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-1 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-1/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-1/Makefile deleted file mode 100644 index 445913746c..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-1/Makefile +++ /dev/null @@ -1,167 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-1/step-1.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-1/step-1.cc deleted file mode 100644 index f2487effbe..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-1/step-1.cc +++ /dev/null @@ -1,228 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 1999 */ - - // The most fundamental class in the - // library is the ``Triangulation'' - // class, which is declared here: -#include - // We need the following two includes - // for loops over cells and/or faces: -#include -#include - // Here are some functions to - // generate standard grids: -#include - // We would like to use boundaries - // which are not straight lines, so - // we import some classes which - // predefine some boundary - // descriptions: -#include - // Output of grids in various - // graphics formats: -#include - - // This is needed for C++ output: -#include - - - - // In the following function, we - // simply use the unit square as - // domain and produce a globally - // refined grid from it. -void first_grid () -{ - // Define an object for a - // triangulation of a - // two-dimensional domain. Here and - // in many following cases, the - // string "<2>" after a class name - // indicates that this is an object - // that shall work in two space - // dimensions. Likewise, there are - // version working in one ("<1>") - // and three ("<3>") space - // dimensions, or for all - // dimensions. We will see such - // constructs in later examples, - // where we show how to program - // dimension independently. - // (At present, only one through - // three space dimensions are - // supported, but that is not a - // restriction. In case someone - // would like to implement four - // dimensional finite elements, for - // example for general relativity, - // this would be a straightforward - // thing.) - Triangulation<2> triangulation; - - // Fill it with a square - GridGenerator::hyper_cube (triangulation); - - // Refine all cells four times, to - // yield 4^4=256 cells in total - triangulation.refine_global (4); - - // Now we want to write it to some - // output, here in postscript - // format - ofstream out ("grid-1.eps"); - GridOut grid_out; - grid_out.write_eps (triangulation, out); -}; - - - - // The grid in the following function - // is slightly more complicated in - // that we use a ring domain and - // refine the result once globally -void second_grid () -{ - // Define an object for a - // triangulation of a - // two-dimensional domain - Triangulation<2> triangulation; - - // Fill it with a ring domain. The - // center of the ring shall be the - // point (1,0), and inner and outer - // radius shall be 0.5 and 1. The - // number of circumferentical cells - // will be adjusted automatically - // by this function (in this case, - // there will be 10) - const Point<2> center (1,0); - const double inner_radius = 0.5, - outer_radius = 1.0; - GridGenerator::hyper_shell (triangulation, - center, inner_radius, outer_radius); - // By default, the triangulation - // assumes that all boundaries are - // straight and given by the cells - // of the coarse grid (which we - // just created). Here, however, we - // would like to have a curved - // boundary. Furtunately, some good - // soul implemented an object which - // describes the boundary of a ring - // domain; it only needs the center - // of the ring and automatically - // figures out the inner and outer - // radius when needed. Note that we - // associate this boundary object - // with that part of the boundary - // that has the "boundary number" - // zero. By default, all boundary - // parts have this number, but you - // might want to change this number - // for some parts, and then the - // curved boundary thus associated - // with number zero will not apply - // there. - const HyperShellBoundary<2> boundary_description(center); - triangulation.set_boundary (0, boundary_description); - - // Now, just for the purpose of - // demonstration and for no - // particular reason, we will - // refine the grid in five steps - // towards the inner circle of the - // domain: - for (unsigned int step=0; step<5; ++step) - { - // Get an iterator which points - // to a cell and which we will - // move over all active cells - // one by one. Active cells are - // those that are not further - // refined - Triangulation<2>::active_cell_iterator cell, endc; - cell = triangulation.begin_active(); - endc = triangulation.end(); - - // Now loop over all cells... - for (; cell!=endc; ++cell) - // ...and over all vertices - // of the cells. Note the - // dimension-independent way - // by which we find out about - // the number of faces of a - // cell - for (unsigned int vertex=0; - vertex < GeometryInfo<2>::vertices_per_cell; - ++vertex) - { - // If this cell is at the - // inner boundary, then - // at least one of its vertices - // must have a radial - // distance from the center - // of 0.5 - const Point<2> vector_to_center - = (cell->vertex(vertex) - center); - const double distance_from_center - = sqrt(vector_to_center.square()); - - if (fabs(distance_from_center - inner_radius) < 1e-10) - { - // Ok, this is one of - // the cells we were - // looking for. Flag - // it for refinement - // and go to the next - // cell by breaking - // the loop over all - // vertices - cell->set_refine_flag (); - break; - }; - }; - - // Refine the cells which we - // have marked - triangulation.execute_coarsening_and_refinement (); - }; - - - // Now we want to write it to some - // output, here in postscript - // format - ofstream out ("grid-2.eps"); - GridOut grid_out; - grid_out.write_eps (triangulation, out); - - - // At this point, all objects - // created in this function will be - // destroyed in reverse - // order. Unfortunately, we defined - // the boundary object after the - // triangulation, which still has a - // pointer to it and the library - // will produce an error if the - // boundary object is destroyed - // before the triangulation. We - // therefore have to release it, - // which can be done as - // follows. Note that this sets the - // boundary object used for part - // "0" of the boundary back to a - // default object, over which the - // triangulation has full control. - triangulation.set_boundary (0); -}; - - - - // Main function. Only call the two - // subfunctions, which produce the - // two grids. -int main () -{ - first_grid (); - second_grid (); -}; diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-2/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-2/.cvsignore deleted file mode 100644 index 07b5e7b099..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-2/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-2 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-2/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-2/Makefile deleted file mode 100644 index 445913746c..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-2/Makefile +++ /dev/null @@ -1,167 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-2/step-2.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-2/step-2.cc deleted file mode 100644 index 181571a7bc..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-2/step-2.cc +++ /dev/null @@ -1,361 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 1999 */ - - // The following includes are just - // like for the previous program, so - // will not be commented further -#include -#include -#include -#include -#include - - // We need this include file for the - // association of degrees of freedom - // ("DoF"s) to vertices, lines, and - // cells. -#include - // The following include contains the - // description of the bilinear finite - // element, including the facts that - // it has one degree of freedom on - // each vertex of the triangulation, - // but none on faces and none in the - // interior of the cells. - // - // In fact, the file contains the - // description of several more finite - // elements as well, such as - // biquadratic, bicubic and biquartic - // elements, but not only for two - // space dimensions, but also for one - // and three dimensions. -#include - // In the following file, several - // tools for manipulating degrees of - // freedom can be found: -#include - // We will use a sparse matrix to - // visualize the pattern of nonzero - // entries resulting from the - // distribution of degrees of freedom - // on the grid. That class can be - // found here: -#include - // We will want to use a special - // algorithm to renumber degrees of - // freedom. It is declared here: -#include - - // This is needed for C++ output: -#include - - - - // This is the function that produced - // the circular grid in the previous - // example. The sole difference is - // that it returns the grid it - // produces via its argument. - // - // We won't comment on the internals - // of this function, since this has - // been done in the previous - // example. If you don't understand - // what is happening here, look - // there. -void make_grid (Triangulation<2> &triangulation) -{ - const Point<2> center (1,0); - const double inner_radius = 0.5, - outer_radius = 1.0; - GridGenerator::hyper_shell (triangulation, - center, inner_radius, outer_radius); - - // This is the single difference to - // the respetive function in the - // previous program: since we want - // to export the triangulation - // through this function's - // parameter, we need to make sure - // that the boundary object lives - // at least as long as the - // triangulation does. However, - // since the boundary object is a - // local variable, it would be - // deleted at the end of this - // function, which is too early; by - // declaring it 'static', we can - // assure that it lives until the - // end of the program. - static const HyperShellBoundary<2> boundary_description(center); - triangulation.set_boundary (0, boundary_description); - - for (unsigned int step=0; step<5; ++step) - { - Triangulation<2>::active_cell_iterator cell, endc; - cell = triangulation.begin_active(); - endc = triangulation.end(); - - for (; cell!=endc; ++cell) - for (unsigned int vertex=0; - vertex < GeometryInfo<2>::vertices_per_cell; - ++vertex) - { - const Point<2> vector_to_center - = (cell->vertex(vertex) - center); - const double distance_from_center - = sqrt(vector_to_center.square()); - - if (fabs(distance_from_center - inner_radius) < 1e-10) - { - cell->set_refine_flag (); - break; - }; - }; - - triangulation.execute_coarsening_and_refinement (); - }; -}; - - - // Up to now, we only have a grid, - // i.e. some geometrical (the - // position of the vertices and which - // vertices make up which cell) and - // some topological information - // (neighborhoods of cells). To use - // numerical algorithms, one needs - // some logic information in addition - // to that: we would like to - // associate degree of freedom - // numbers to each vertex (or line, - // or cell, in case we were using - // higher order elements) to later - // generate matrices and vectors - // which describe a finite element - // field on the triangulation. -void distribute_dofs (DoFHandler<2> &dof_handler) -{ - // In order to associate degrees of - // freedom with features of a - // triangulation (vertices, lines, - // quadrilaterals), we need an - // object which describes how many - // degrees of freedom are to be - // associated to each of these - // objects. For (bi-, tri-)linear - // finite elements, this is done - // using the FEQ1 class, which - // states that one degree of - // freedom is to be assigned to - // each vertex, while there are - // none on lines and inside the - // quadrilateral. We first need to - // create an object of this class - // and use it to distribute the - // degrees of freedom. Note that - // the DoFHandler object will store - // a reference to this object, so - // we need to make it static as - // well, in order to prevent its - // preemptive - // destruction. (However, the - // library would warn us about this - // and exit the program if that - // occured. You can check this, if - // you want, by removing the - // 'static' declaration.) - static const FEQ1<2> finite_element; - dof_handler.distribute_dofs (finite_element); - - // Now we have associated a number - // to each vertex, but how can we - // visualize this? Unfortunately, - // presently there is no way - // implemented to directly show the - // DoF number associated with each - // vertex. However, such - // information would hardly ever be - // truly important, since the - // numbering itself is more or less - // arbitrary. There are more - // important factors, of which we - // will visualize one in the - // following. - // - // Associated with each vertex of - // the triangulation is a shape - // function. Assume we want to - // solve something like Laplace's - // equation, then the different - // matrix entries will be the - // integrals over the gradient of - // each two such shape - // functions. Obviously, since the - // shape functions are not equal to - // zero only on the cells adjacent - // to the vertex they are - // associated to, matrix entries - // will be nonzero only of the - // supports of the shape functions - // associated to the column and row - // numbers intersect. This is only - // the case for adjacent shape - // functions, and therefore only - // for adjacent vertices. Now, - // since the vertices are numbered - // more or less randomly be the - // above function - // (distribute_dofs), the pattern - // of nonzero entries in the matrix - // will be somewhat ragged, and we - // will take a look at it now. - // - // First we have to create a - // structure which we use to store - // the places of nonzero - // elements. We have to give it the - // size of the matrix, which in our - // case will be square with that - // many rows and columns as there - // are degrees of freedom on the - // grid: - SparsityPattern sparsity_pattern (dof_handler.n_dofs(), - dof_handler.n_dofs()); - // We fill it with the places where - // nonzero elements will be located - // given the present numbering of - // degrees of freedom: - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - // Before further work can be done - // on the object, we have to allow - // for some internal - // reorganization: - sparsity_pattern.compress (); - - // Now write the results to a file - ofstream out ("sparsity_pattern.1"); - sparsity_pattern.print_gnuplot (out); - // The result is in GNUPLOT format, - // where in each line of the output - // file, the coordinates of one - // nonzero entry are listed. The - // output will be shown below. - // - // If you look at it, you will note - // that the sparsity pattern is - // symmetric, which is quite often - // so, unless you have a rather - // special equation you want to - // solve. You will also note that - // it has several distinct region, - // which stem from the fact that - // the numbering starts from the - // coarsest cells and moves on to - // the finer ones; since they are - // all distributed symmetrically - // around the origin, this shows up - // again in the sparsity pattern. -}; - - - - // In the sparsity pattern produced - // above, the nonzero entries - // extended quite far off from the - // diagonal. For some algorithms, - // this is unfavorable, and we will - // show a simple way how to improve - // this situation. - // - // Remember that for an entry (i,j) - // in the matrix to be nonzero, the - // supports of the shape functions i - // and j needed to intersect - // (otherwise in the integral, the - // integrand would be zero everywhere - // since either the one or the other - // shape function is zero at some - // point). However, the supports of - // shape functions intersected only - // of they were adjacent to each - // other, so in order to have the - // nonzero entries clustered around - // the diagonal (where i equals j), - // we would like to have adjacent - // shape functions to be numbered - // with indices (DoF numbers) that - // differ not too much. - // - // This can be accomplished by a - // simple front marching algorithm, - // where one starts at a given vertex - // and gives it the index zero. Then, - // its neighbors are numbered - // successively, making their indices - // close to the original one. Then, - // their neighbors, if not yet - // numbered, are numbered, and so - // on. One such algorithm is the one - // by Cuthill and McKee, which is a - // little more complicated, but works - // along the same lines. We will use - // it to renumber the degrees of - // freedom such that the resulting - // sparsity pattern is more localized - // around the diagonal. -void renumber_dofs (DoFHandler<2> &dof_handler) -{ - // Renumber the degrees of freedom... - DoFRenumbering::Cuthill_McKee (dof_handler); - // ...regenerate the sparsity pattern... - SparsityPattern sparsity_pattern (dof_handler.n_dofs(), - dof_handler.n_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress (); - // ...and output the result: - ofstream out ("sparsity_pattern.2"); - sparsity_pattern.print_gnuplot (out); - // Again, the output is shown - // below. Note that the nonzero - // entries are clustered far better - // around the diagonal than - // before. This effect is even more - // distinguished for larger - // matrices (the present one has - // 1260 rows and columns, but large - // matrices often have several - // 100,000s). -}; - - - - - // This is the main program, which - // only calls the other functions in - // their respective order. -int main () -{ - // Allocate space for a triangulation... - Triangulation<2> triangulation; - // ...and create it - make_grid (triangulation); - - // A variable that will hold the - // information which vertex has - // which number. The geometric - // information is passed as - // parameter and a pointer to the - // triangulation will be stored - // inside the DoFHandler object. - DoFHandler<2> dof_handler (triangulation); - // Associate vertices and degrees - // of freedom. - distribute_dofs (dof_handler); - - // Show the effect of renumbering - // of degrees of freedom to the - // sparsity pattern of the matrix. - renumber_dofs (dof_handler); -}; diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-3/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-3/.cvsignore deleted file mode 100644 index 03e276f8de..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-3/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-3 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-3/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-3/Makefile deleted file mode 100644 index 445913746c..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-3/Makefile +++ /dev/null @@ -1,167 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-3/step-3.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-3/step-3.cc deleted file mode 100644 index 990d8e46a4..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-3/step-3.cc +++ /dev/null @@ -1,829 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 1999 */ - - // These include files are already - // known to you. They declare the - // classes which handle - // triangulations and enumerate the - // degrees of freedom. -#include -#include - // And this is the file in which the - // functions are declared which - // create grids. -#include - - // The next three files contain - // classes which are needed for loops - // over all cells and to get the - // information from the cell objects. -#include -#include -#include - - // In this file are the finite - // element descriptions. -#include - - // And this file is needed for the - // creation of sparsity patterns of - // sparse matrices, as shown in - // previous examples: -#include - - // The next two file are needed for - // assembling the matrix using - // quadrature on each cell. The - // classes declared in them will be - // explained below. -#include -#include - - // The following three include files - // we need for the treatment of - // boundary values: -#include -#include -#include - - // These include files are for the - // linear algebra which we employ to - // solve the system of equations - // arising from the finite element - // discretization of the Laplace - // equation. We will use vectors and - // full matrices for assembling the - // system of equations locally on - // each cell, and transfer the - // results into a sparse matrix. We - // will then use a Conjugate Gradient - // solver to solve the problem, for - // which we need a preconditioner (in - // this program, we use the identity - // preconditioner which does nothing, - // but we need to include the file - // anyway), and a class which - // provides the solver with some - // memory for temporary vectors. -#include -#include -#include -#include -#include -#include - - // Finally, this is for output to a - // file. -#include -#include - - - // Instead of the procedural - // programming of previous examples, - // we encapsulate everything into a - // class for this program. The class - // consists of functions which do - // certain aspects of a finite - // element program, a `main' function - // which controls what is done first - // and what is done next, and a list - // of member variables. -class LaplaceProblem -{ - public: - // This is the constructor: - LaplaceProblem (); - - // And the top-level function, - // which is called from the - // outside to start the whole - // program (see the `main' - // function at the bottom of this - // file): - void run (); - - // Then there are some member - // functions that mostly do what - // their names suggest. Since - // they do not need to be called - // from outside, they are made - // private to this class. - private: - void make_grid_and_dofs (); - void assemble_system (); - void solve (); - void output_results () const; - - // And then we have the member - // variables. There are variables - // describing the triangulation - // and the numbering of the - // degrees of freedom... - Triangulation<2> triangulation; - FEQ1<2> fe; - DoFHandler<2> dof_handler; - - // ...variables for the sparsity - // pattern and values of the - // system matrix resulting from - // the discretization of the - // Laplace equation... - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - // ...and variables which will - // hold the right hand side and - // solution vectors. - Vector solution; - Vector system_rhs; -}; - - - // Here comes the constructor. It - // does not much more than associate - // the dof_handler variable to the - // triangulation we use. All the - // other member variables of the - // LaplaceProblem class have a - // default constructor which does all - // we want. -LaplaceProblem::LaplaceProblem () : - dof_handler (triangulation) -{}; - - - // Now, the first thing we've got to - // do is to generate the - // triangulation on which we would - // like to do our computation and - // number each vertex with a degree - // of freedom. We have seen this in - // the previous examples before. Then - // we have to set up space for the - // system matrix and right hand side - // of the discretized problem. This - // is what this function does: -void LaplaceProblem::make_grid_and_dofs () -{ - // First create the grid and refine - // all cells five times. Since the - // initial grid (which is the - // square [-1,1]x[-1,1]) consists - // of only one cell, the final grid - // has 32 times 32 cells, for a - // total of 1024. - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (5); - // Unsure that 1024 is the correct - // number? Let's see: - // n_active_cells return the number - // of terminal cells. By terminal - // we mean the cells on the finest - // grid. - cout << "Number of active cells: " - << triangulation.n_active_cells() - << endl; - // We stress the adjective - // `terminal' or `active', since - // there are more cells, namely the - // parent cells of the finest - // cells, their parents, etc, up to - // the one cell which made up the - // initial grid. Of course, on the - // next coarser level, the number - // of cells is one quarter that of - // the cells on the finest level, - // i.e. 256, then 64, 16, 4, and - // 1. We can get the total number - // of cells like this: - cout << "Total number of cells: " - << triangulation.n_cells() - << endl; - // Note the distinction between - // n_active_cells() and n_cells(). - - // Next we enumerate all the - // degrees of freedom. This is done - // by using the distribute_dofs - // function, as we have seen in - // previous examples. Since we use - // the FEQ1 class, i.e. bilinear - // elements, this associates one - // degree of freedom with each - // vertex. - dof_handler.distribute_dofs (fe); - - // Now that we have the degrees of - // freedom, we can take a look at - // how many there are: - cout << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << endl; - // There should be one DoF for each - // vertex. Since we have a 32 times - // 32 grid, the number of DoFs - // should be 33 times 33, or 1089. - - // As we have seen in the previous - // example, we set up a sparse - // matrix for the system matrix and - // tag those entries that might be - // nonzero. Since that has already - // been done, we won't discuss the - // next few lines: - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); - - // Now the sparsity pattern is - // built and fixed (after - // `compress' has been called, you - // can't add nonzero entries - // anymore; the sparsity pattern is - // `sealed', so to say), and we can - // initialize the matrix itself - // with it. Note that the - // SparsityPattern object does - // not hold the values of the - // matrix, it only stores the - // places where entries are. The - // entries are themselves stored in - // objects of type SparseMatrix, of - // which our variable system_matrix - // is one. - // - // The distinction between sparsity - // pattern and matrix was made to - // allow several matrices to use - // the same sparsity pattern. This - // may not seem relevant, but when - // you consider the size which - // matrices can have, and that it - // may take some time to build the - // sparsity pattern, this becomes - // important in large-scale - // problems. - system_matrix.reinit (sparsity_pattern); - - // The last thing to do in this - // function is to set the sizes of - // the right hand side vector and - // the solution vector to the right - // values: - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -}; - - - // Now comes the difficult part: - // assembling matrices and - // vectors. In fact, this is not - // overly difficult, but it is - // something that the library can't - // do for you as for most of the - // other things in the functions - // above and below. - // - // The general way to assemble - // matrices and vectors is to loop - // over all cells, and on each cell - // compute the contribution of that - // cell to the global matrix and - // right hand side by quadrature. The - // idea now is that since we only - // need the finite element shape - // functions on the quadrature points - // of each cell, we don't need the - // shape functions of the finite - // element themselves any - // more. Therefore, we won't deal - // with the finite element object - // `fe' (which was of type FEQ1), but - // with another object which only - // provides us with the values, - // gradients, etc of the shape - // functions at the quadrature - // points. The objects which do this - // are of type FEValues. -void LaplaceProblem::assemble_system () -{ - // Ok, let's start: we need a - // quadrature formula for the - // evaluation of the integrals on - // each cell. Let's take a Gauss - // formula with two quadrature - // points in each direction, i.e. a - // total of four points since we - // are in 2D. This quadrature - // formula integrates polynomials - // of degrees up to three exactly - // (in 1D). Since the integrands in - // the matrix entries are quadratic - // (in 1D), this is sufficient. The - // same holds for 2D. - QGauss2<2> quadrature_formula; - // And we initialize the object - // which we have briefly talked - // about above. It needs to be told - // which the finite element is that - // we want to use, the quadrature - // points and their - // weights. Finally, we have to - // tell it what we want it to - // compute on each cell: we need - // the values of the shape - // functions at the quadrature - // points, their gradients, and - // also the weights of the - // quadrature points and the - // determinants of the Jacobian - // transformations from the unit - // cell to the real cells. The - // values of the shape functions - // computed by specifying - // update_values; the gradients are - // done alike, using - // update_gradients. The - // determinants of the Jacobians - // and the weights are always used - // together, so only the products - // (Jacobians times weights, or - // short JxW) are computed; since - // we also need them, we have to - // list them as well: - FEValues<2> fe_values (fe, quadrature_formula, - UpdateFlags(update_values | - update_gradients | - update_JxW_values)); - - // For use further down below, we - // define two short cuts for the - // number of degrees of freedom on - // each cell (since we are in 2D - // and degrees of freedom are - // associated with vertices only, - // this number is four). We also - // define an abbreviation for the - // number of quadrature points - // (here that should be nine). In - // general, it is a good idea to - // use their symbolic names instead - // of hard-coding these number even - // if you know them, since you may - // want to change the quadrature - // formula and/or finite element at - // some time; the program will just - // work with these changes, without - // the need to change the matrix - // assemblage. - // - // The shortcuts, finally, are only - // defined to make the following - // loops a bit more readable. You - // will see them in many places in - // larger programs, and - // `dofs_per_cell' and `n_q_points' - // are more or less standard names - // for these purposes. - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.n_quadrature_points; - - // Now, we said that we wanted to - // assemble the global matrix and - // vector cell-by-cell. We could - // write the results directly into - // the global matrix, but this is - // not very efficient since access - // to the elements of a sparse - // matrix is slow. Rather, we first - // compute the contribution of each - // ell in a small matrix with the - // degrees of freedom on the - // present cell, and only transfer - // them to the global matrix when - // the copmutations are finished - // for this cell. We do the same - // for the right hand side vector, - // although access times are not so - // problematic for them. - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - // When assembling the - // contributions of each cell, we - // do this with the local numbering - // of the degrees of freedom - // (i.e. the number running from - // zero through - // dofs_per_cell-1). However, when - // we transfer the result into the - // global matrix, we have to know - // the global numbers of the - // degrees of freedom. When we get - // them, we need a scratch array - // for these numbers: - vector local_dof_indices (dofs_per_cell); - - // Now for th loop over all - // cells. You have seen before how - // this works, so this should be - // familiar to you: - DoFHandler<2>::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - // We are on one cell, and we - // would like the values and - // gradients of the shape - // functions be computed, as - // well as the determinants of - // the Jacobian matrices of the - // mapping between unit cell - // and true cell, at the - // quadrature points. Since all - // these values depend on the - // geometry of the cell, we - // have to have the FEValues - // object re-compute them on - // each cell: - fe_values.reinit (cell); - - // Reset the values of the - // contributions of this cell - // to global matrix and global - // right hand side to zero, - // before we fill them. - cell_matrix.clear (); - cell_rhs.clear (); - - // Assemble the matrix: For the - // Laplace problem, the matrix - // on each cell is the integral - // over the gradients of shape - // function i and j. Since we - // do not integrate, but rather - // use quadrature, this is the - // sum over all quadrature - // points of the integrands - // times the determinant of the - // Jacobian matrix at the - // quadrature point times the - // weight of this quadrature - // point. You can get the - // gradient of shape function i - // at quadrature point q_point - // by using - // fe_values.shape_grad(i,q_point); - // this gradient is a - // 2-dimensional vector (in - // fact it is of type - // Tensor<1,dim>, with here - // dim=2) and the product of - // two such vectors is the - // scalar product, i.e. the - // product of the two - // shape_grad function calls is - // the dot product. - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - - // Then again loop over all - // shape functions i and j and - // transfer the local elements - // to the global matrix. The - // global numbers can be - // obtained using - // local_dof_indices[i]: - for (unsigned int i=0; i boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction<2>(), - boundary_values); - // Now that we got the list of - // boundary DoFs and their - // respective boundary values, - // let's use them to modify the - // system of equations - // accordingly. This is done by the - // following function call: - MatrixTools<2>::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); -}; - - - // The following function simply - // solves the discretized - // equation. As the system is quite a - // large one for direct solvers such - // as Gauss elimination or LU - // decomposition, we use a Conjugate - // Gradient algorithm. You should - // remember that the number of - // variables here (only 1089) is a - // very small number for finite - // element computations, where - // 100.000 is a more usual number; - // for this number of variables, - // direct methods are no longer - // usable and you are forced to use - // methods like CG. -void LaplaceProblem::solve () -{ - // We need to tell the algorithm - // where to stop. This is done by - // using a SolverControl object, - // and as stopping criterion we - // say: maximally 1000 iterations - // (which is far more than is - // needed for 1089 variables; see - // the results section to find out - // how many were really used), and - // stop if the norm of the residual - // is below 1e-12. In practice, the - // latter criterion will be the one - // which stops the iteration. - SolverControl solver_control (1000, 1e-12); - // Furthermore, the CG algorithm - // needs some space for temporary - // vectors. Rather than allocating - // it on the stack or heap itself, - // it relies on helper objects, - // which can sometimes do a better - // job at this. The - // PrimitiveVectorMemory class is - // such a helper class which the - // solver can ask for memory. The - // angle brackets indicate that - // this class really takes a - // template parameter (here the - // data type of the vectors we - // use), which however has a - // default value, which is - // appropriate here. - PrimitiveVectorMemory<> vector_memory; - // Then we need the solver - // itself. The template parameters - // here are the matrix type and the - // type of the vectors. They - // default to the ones we use here. - SolverCG<> cg (solver_control, vector_memory); - - // Now solve the system of - // equations. The CG solver takes a - // preconditioner, but we don't - // want to use one, so we tell it - // to use the identity operation as - // preconditioner. - cg.solve (system_matrix, solution, system_rhs, - PreconditionIdentity()); - // Now that the solver has done its - // job, the solution variable - // contains the nodal values of the - // solution function. -}; - - - // The last part of a typical finite - // element program is to output the - // results and maybe do some - // postprocessing (for example - // compute the maximal stress values - // at the boundary, or the average - // flux across the outflow, etc). We - // have no such postprocessing here, - // but we would like to write the - // solution to a file. -void LaplaceProblem::output_results () const -{ - // To write the output to a file, - // we need an object which knows - // about output formats and the - // like. This is the DataOut class, - // and we need an object of that - // type: - DataOut<2> data_out; - // Now we have to tell it where to - // take the values from which it - // shall write. We tell it which - // DoFHandler object to use, and we - // add the solution vector (and the - // name by which it shall be - // written to disk) to the list of - // data that is to be written. If - // we had more than one vector - // which we would like to look at - // in the output (for example right - // hand sides, errors per cell, - // etc) we would add them as well: - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); - // After the DataOut object knows - // which data it is to work on, we - // have to tell it to process them - // into something the backends can - // handle. The reason is that we - // have separated the frontend - // (which knows about how to treat - // DoFHandler objects and data - // vectors) from the backend (which - // knows several output formats) - // and use an intermediate data - // format to transfer data from the - // front- to the backend. The data - // is transformed into this - // intermediate format by the - // following function: - data_out.build_patches (); - - // Now we have everything in place - // for the actual output. Just open - // a file and write the data into - // it, using GNUPLOT format (there - // are other functions which write - // their data in postscript, AVS, - // GMV, or some other format): - ofstream output ("solution.gpl"); - data_out.write_gnuplot (output); -}; - - - // The following function is the main - // function which calls all the other - // functions of the LaplaceProblem - // class. The order in which this is - // done resembles the order in which - // most finite element programs - // work. Since the names are mostly - // self-explanatory, there is not - // much to comment about: -void LaplaceProblem::run () -{ - make_grid_and_dofs (); - assemble_system (); - solve (); - output_results (); -}; - - - - // This is the main function of the - // program. Since the concept of a - // main function is mostly a remnant - // from the pre-object era in C/C++ - // programming, it often does not - // much more than creating an object - // of the top-level class and calling - // it principle function. This is - // what is done here as well. -int main () -{ - LaplaceProblem laplace_problem; - laplace_problem.run (); - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-4/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-4/.cvsignore deleted file mode 100644 index 617a14613a..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-4/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-4 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-4/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-4/Makefile deleted file mode 100644 index bd4dad5fbc..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-4/Makefile +++ /dev/null @@ -1,169 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-deal2-3d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-deal2-3d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-4/step-4.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-4/step-4.cc deleted file mode 100644 index bac3800554..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-4/step-4.cc +++ /dev/null @@ -1,607 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 1999 */ - - // The first few (many?) include - // files have already been used in - // the previous example, so we will - // not explain their meaning here - // again. -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - - // This is new, however: in the - // previous example we got some - // unwanted output from the linear - // solvers. If we want to suppress - // it, we have to include this file - // and add a line somewhere to the - // program; in this program, it was - // added to the main function. -#include - - - - // This is again the same - // LaplaceProblem class as in the - // previous example. The only - // difference is that we have now - // declared it as a class with a - // template parameter, and the - // template parameter is of course - // the spatial dimension in which we - // would like to solve the Laplace - // equation. Of course, several of - // the member variables depend on - // this dimension as well, in - // particular the Triangulation - // class, which has to represent - // quadrilaterals or hexahedra, - // respectively. Apart from this, - // everything is as before. -template -class LaplaceProblem -{ - public: - LaplaceProblem (); - void run (); - - private: - void make_grid_and_dofs (); - void assemble_system (); - void solve (); - void output_results () const; - - Triangulation triangulation; - FEQ1 fe; - DoFHandler dof_handler; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - Vector solution; - Vector system_rhs; -}; - - - // In the following, we declare two - // more classes, which will represent - // the functions of the - // dim-dimensional space denoting the - // right hand side and the - // non-homogeneous Dirichlet boundary - // values. - // - // Each of these classes is derived - // from a common, abstract base class - // Function, which declares the - // common interface which all - // functions have to follow. In - // particular, concrete classes have - // to overload the `value' function, - // which takes a point in - // dim-dimensional space as - // parameters and shall return the - // value at that point as a `double' - // variable. - // - // The `value' function takes a - // second argument, which we have - // here named `component': This is - // only meant for vector valued - // functions, where you may want to - // access a certain component of the - // vector at the point `p'. However, - // our functions are scalar, so we - // need not worry about this - // parameter and we will not use it - // in the implementation of the - // functions. Note that in the base - // class (Function), the declaration - // of the `value' function has a - // default value of zero for the - // component, so we will access the - // `value' function of the right hand - // side with only one parameter, - // namely the point where we want to - // evaluate the function. -template -class RightHandSide : public Function -{ - public: - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - - -template -class BoundaryValues : public Function -{ - public: - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - - - - // We wanted the right hand side - // function to be 4*(x**4+y**4) in - // 2D, or 4*(x**4+y**4+z**4) in - // 3D. Unfortunately, this is not as - // elegantly feasible dimension - // independently as much of the rest - // of this program, so we have to do - // it using a small - // loop. Fortunately, the compiler - // knows the size of the loop at - // compile time, i.e. the number of - // times the body will be executed, - // so it can optimize away the - // overhead needed for the loop and - // the result will be as fast as if - // we had used the formulas above - // right away. - // - // Note that the different - // coordinates (i.e. `x', `y', ...) - // of the point are accessed using - // the () operator. -template -double RightHandSide::value (const Point &p, - const unsigned int) const -{ - double return_value = 0; - for (unsigned int i=0; i -double BoundaryValues::value (const Point &p, - const unsigned int) const -{ - return p.square(); -}; - - - - - // This is the constructor of the - // LaplaceProblem class. It - // associates the DoFHandler to the - // triangulation just as in the - // previous example. -template -LaplaceProblem::LaplaceProblem () : - dof_handler (triangulation) -{}; - - - - // Grid creation is something - // inherently dimension - // dependent. However, as long as the - // domains are sufficiently similar - // in 2D or 3D, the library can - // abstract for you. In our case, we - // would like to again solve on the - // square [-1,1]x[-1,1] in 2D, or on - // the cube [-1,1]x[-1,1]x[-1,1] in - // 3D; both can be termed - // ``hyper_cube'', so we may use the - // same function in whatever - // dimension we are. Of course, the - // functions that create a hypercube - // in two and three dimensions are - // very much different, but that is - // something you need not care - // about. Let the library handle the - // difficult things. - // - // Likewise, associating a degree of - // freedom with each vertex is - // something which certainly looks - // different in 2D and 3D, but that - // does not need to bother you. This - // function therefore looks exactly - // like in the previous example, - // although it performs actions that - // in their details are quite - // different. The only significant - // difference is the number of cells - // resulting, which is much higher in - // three than in two space - // dimensions! -template -void LaplaceProblem::make_grid_and_dofs () -{ - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (4); - - cout << " Number of active cells: " - << triangulation.n_active_cells() - << endl - << " Total number of cells: " - << triangulation.n_cells() - << endl; - - dof_handler.distribute_dofs (fe); - - cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << endl; - - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); - - system_matrix.reinit (sparsity_pattern); - - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -}; - - - - // Unlike in the previous example, we - // would now like to use a - // non-constant right hand side - // function and non-zero boundary - // values. Both are tasks that are - // readily achieved with a only a few - // new lines of code in the - // assemblage of the matrix and right - // hand side. - // - // More interesting, though, is they - // way we assemble matrix and right - // hand side vector dimension - // independently: there is simply no - // difference to the pure - // two-dimensional case. Since the - // important objects used in this - // function (quadrature formula, - // FEValues) depend on the dimension - // by way of a template parameter as - // well, they can take care of - // setting up properly everything for - // the dimension for which this - // function is compiled. By declaring - // all classes which might depend on - // the dimension using a template - // parameter, the library can make - // nearly all work for you and you - // don't have to care about most - // things. -template -void LaplaceProblem::assemble_system () -{ - QGauss2 quadrature_formula; - - // We wanted to have a non-constant - // right hand side, so we use an - // object of the class declared - // above to generate the necessary - // data. Since this right hand side - // object is only used in this - // function, we only declare it - // here, rather than as a member - // variable of the LaplaceProblem - // class, or somewhere else. - const RightHandSide right_hand_side; - - // Compared to the previous - // example, in order to evaluate - // the non-constant right hand side - // function we now also need the - // quadrature points on the cell we - // are presently on (previously, - // they were only needed on the - // unit cell, in order to compute - // the values and gradients of the - // shape function, which are - // defined on the unit cell - // however). We can tell the - // FEValues object to do for us by - // giving it the update_q_points - // flag: - FEValues fe_values (fe, quadrature_formula, - UpdateFlags(update_values | - update_gradients | - update_q_points | - update_JxW_values)); - - // Note that the following numbers - // depend on the dimension which we - // are presently using. However, - // the FE and Quadrature classes do - // all the necessary work for you - // and you don't have to care about - // the dimension dependent parts: - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.n_quadrature_points; - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - vector local_dof_indices (dofs_per_cell); - - // Note here, that a cell is a - // quadrilateral in two space - // dimensions, but a hexahedron in - // 3D. In fact, the - // active_cell_iterator data type - // is something different, - // depending on the dimension we - // are in, but to the outside world - // they look alike and you will - // probably never see a difference - // although they are totally - // unrelated. - DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - cell_matrix.clear (); - cell_rhs.clear (); - - // Now we have to assemble the - // local matrix and right hand - // side. This is done exactly - // like in the previous - // example, but now we revert - // the order of the loops - // (which we can safely do - // since they are independent - // of each other) and merge the - // loops for the local matrix - // and the local vector as far - // as possible; this makes - // things a bit faster. - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; i boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - BoundaryValues(), - boundary_values); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); -}; - - - // Solving the linear system of - // equation is something that looks - // almost identical in most - // programs. In particular, it is - // dimension independent, so this - // function is mostly copied from the - // previous example. -template -void LaplaceProblem::solve () -{ - SolverControl solver_control (1000, 1e-12); - PrimitiveVectorMemory<> vector_memory; - SolverCG<> cg (solver_control, vector_memory); - cg.solve (system_matrix, solution, system_rhs, - PreconditionIdentity()); - - // We have made one addition, - // though: since we suppress output - // from the linear solvers, we have - // to print the number of - // iterations by hand. - cout << " " << solver_control.last_step() - << " CG iterations needed to obtain convergence." - << endl; -}; - - - - // This function also does what the - // respective one did in the previous - // example. No changes here for - // dimension independentce either. -template -void LaplaceProblem::output_results () const -{ - DataOut data_out; - - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); - - data_out.build_patches (); - - // Only difference to the previous - // example: write output in GMV - // format, rather than for - // gnuplot. We use the dimension in - // the filename to generate - // distinct filenames for each run - // (in a better program, one would - // check whether `dim' can have - // other values than 2 or 3, but we - // neglect this here for the sake - // of brevity). - ofstream output (dim == 2 ? - "solution-2d.gmv" : - "solution-3d.gmv"); - data_out.write_gmv (output); -}; - - - - // This is the function which has the - // top-level control over - // everything. Apart from one line of - // additional output, it is the same - // as for the previous example. -template -void LaplaceProblem::run () -{ - cout << "Solving problem in " << dim << " space dimensions." << endl; - - make_grid_and_dofs(); - assemble_system (); - solve (); - output_results (); -}; - - - - // And this is the main function. It - // also looks mostly like in the - // previous example: -int main () -{ - // In the previous example, we had - // the output from the linear - // solvers about the starting - // residual and the number of the - // iteration where convergence was - // detected. This can be suppressed - // like this: - deallog.depth_console (0); - // The rationale here is the - // following: the deallog - // (i.e. deal-log, not de-allog) - // variable represents a stream to - // which some parts of the library - // write output. It redirects this - // output to the console and if - // required to a file. The output - // is nested in a way that each - // function can use a prefix string - // (separated by colons) for each - // line of output; if it calls - // another function, that may also - // use its prefix which is then - // printed after the one of the - // calling function. Since output - // from functions which are nested - // deep below is usually not as - // important as top-level output, - // you can give the deallog - // variable a maximal depth of - // nested output for output to - // console and file. The depth zero - // which we gave here means that no - // output is written. - - // After having done this - // administrative stuff, we can go - // on just as before: define one of - // these top-level objects and - // transfer control to - // it. Actually, now is the point - // where we have to tell the - // compiler which dimension we - // would like to use; all functions - // up to now including the classes - // were only templates and nothing - // has been compiled by now, but by - // declaring the following objects, - // the compiler will start to - // compile all the functions at the - // top using the template parameter - // replaced with a concrete value. - // - // For demonstration, we will first - // let the whole thing run in 2D - // and then in 3D: - LaplaceProblem<2> laplace_problem_2d; - laplace_problem_2d.run (); - - LaplaceProblem<3> laplace_problem_3d; - laplace_problem_3d.run (); - - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-5/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-5/.cvsignore deleted file mode 100644 index 22ee4e3e3e..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-5/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-5 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-5/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-5/Makefile deleted file mode 100644 index 445913746c..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-5/Makefile +++ /dev/null @@ -1,167 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-5/circle-grid.inp b/deal.II/deal.II/Attic/examples/step-by-step/step-5/circle-grid.inp deleted file mode 100644 index f28a7a238a..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-5/circle-grid.inp +++ /dev/null @@ -1,46 +0,0 @@ -25 20 0 0 0 -1 -0.7071 -0.7071 0 -2 0.7071 -0.7071 0 -3 -0.2668 -0.2668 0 -4 0.2668 -0.2668 0 -5 -0.2668 0.2668 0 -6 0.2668 0.2668 0 -7 -0.7071 0.7071 0 -8 0.7071 0.7071 0 -9 0 -1 0 -10 0.5 -0.5 0 -11 0 -0.3139 0 -12 -0.5 -0.5 0 -13 0 -0.6621 0 -14 -0.3139 0 0 -15 -0.5 0.5 0 -16 -1 0 0 -17 -0.6621 0 0 -18 0.3139 0 0 -19 0 0.3139 0 -20 0 0 0 -21 1 0 0 -22 0.5 0.5 0 -23 0.6621 0 0 -24 0 1 0 -25 0 0.6621 0 -1 0 quad 1 9 13 12 -2 0 quad 9 2 10 13 -3 0 quad 13 10 4 11 -4 0 quad 12 13 11 3 -5 0 quad 1 12 17 16 -6 0 quad 12 3 14 17 -7 0 quad 17 14 5 15 -8 0 quad 16 17 15 7 -9 0 quad 3 11 20 14 -10 0 quad 11 4 18 20 -11 0 quad 20 18 6 19 -12 0 quad 14 20 19 5 -13 0 quad 2 21 23 10 -14 0 quad 21 8 22 23 -15 0 quad 23 22 6 18 -16 0 quad 10 23 18 4 -17 0 quad 7 15 25 24 -18 0 quad 15 5 19 25 -19 0 quad 25 19 6 22 -20 0 quad 24 25 22 8 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-5/step-5.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-5/step-5.cc deleted file mode 100644 index a131374b86..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-5/step-5.cc +++ /dev/null @@ -1,940 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 1999 */ - - // Again, the first few include files - // are already known, so we won't - // comment on them: -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - // This one is new. We want to read a - // triangulation from disk, and the - // class which does this is declared - // in the following file: -#include - - // We will use a circular domain, and - // the object describing the boundary - // of it comes from this file: -#include - - // This is C++ ... -#include - // ... and this is too. We will - // convert integers to strings using - // the classes inside this file: -#include - - - - // The main class is mostly as in the - // previous example. The most visible - // change is that the function - // ``make_grid_and_dofs'' has been - // removed, since making of the grid - // is now done in the ``run'' - // function and the rest of its - // functionality now is in - // ``setup_system''. Apart from this, - // everything is as before. -template -class LaplaceProblem -{ - public: - LaplaceProblem (); - void run (); - - private: - void setup_system (); - void assemble_system (); - void solve (); - void output_results (const unsigned int cycle) const; - - Triangulation triangulation; - FEQ1 fe; - DoFHandler dof_handler; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - Vector solution; - Vector system_rhs; -}; - - - - // In this example, we want to use a - // variable coefficient in the - // elliptic operator. Of course, the - // suitable object is a Function, as - // we have used it for the right hand - // side and boundary values in the - // last example. We will use it - // again, but we implement another - // function ``value_list'' which - // takes a list of points and returns - // the values of the function at - // these points as a list. The reason - // why such a function is reasonable - // although we can get all the - // information from the ``value'' - // function as well will be explained - // below when assembling the matrix. -template -class Coefficient : public Function -{ - public: - virtual double value (const Point &p, - const unsigned int component = 0) const; - - virtual void value_list (const vector > &points, - vector &values, - const unsigned int component = 0) const; -}; - - - - // This is the implementation of the - // coefficient function for a single - // point. We let it return 20 if the - // distance to the point of origin is - // less than 0.5, and 1 otherwise: -template -double Coefficient::value (const Point &p, - const unsigned int) const -{ - if (p.square() < 0.5*0.5) - return 20; - else - return 1; -}; - - - - // And this is the function that - // returns the value of the - // coefficient at a whole list of - // points at once. Of course, the - // values are the same as if we would - // ask the ``value'' function. -template -void Coefficient::value_list (const vector > &points, - vector &values, - const unsigned int component) const -{ - // Use n_q_points as an - // abbreviation for the number of - // points for which function values - // are requested: - const unsigned int n_points = points.size(); - - // Now, of course the size of the - // output array (``values'') must - // be the same as that of the input - // array (``points''), and we could - // simply assume that. However, in - // practice more than 90 per cent - // of programming errors are - // invalid function parameters such - // as invalid array sizes, etc, so - // we should try to make sure that - // the parameters are valid. For - // this, the Assert macro is a good - // means, since it asserts that the - // condition which is given as - // first argument is valid, and if - // not throws an exception (its - // second argument) which will - // usually terminate the program - // giving information where the - // error occured and what the - // reason was. This generally - // reduces the time to find - // programming errors dramatically - // and we have found assertions an - // invaluable means to program - // fast. - // - // On the other hand, all these - // checks (there are more than 2000 - // of them in the library) should - // not slow down the program too - // much, which is why the Assert - // macro is only used in debug mode - // and expands to nothing if in - // optimized mode. Therefore, while - // you test your program and debug - // it, the assertions will tell you - // where the problems are, and once - // your program is stable you can - // switch off debugging and the - // program will run without the - // assertions and at maximum speed. - // - // Here, as has been said above, we - // would like to make sure that the - // size of the two arrays is equal, - // and if not throw an - // exception. Since the following - // test is rather frequent for the - // classes derived from - // ``Function'', that class - // declares an exception - // ``ExcVectorHasWrongSize'' which - // takes the sizes of two vectors - // and prints some output in case - // the condition is violated: - Assert (values.size() == n_points, - ExcVectorHasWrongSize (values.size(), n_points)); - // Since examples are not very good - // if they do not demonstrate their - // point, we will show how to - // trigger this exception at the - // end of the main program, and - // what output results from this - // (see the ``Results'' section of - // this example program). You will - // certainly notice that the output - // is quite well suited to quickly - // find what the problem is and - // what parameters are expected. An - // additional plus is that if the - // program is run inside a - // debugger, it will stop at the - // point where the exception is - // triggered, so you can go up the - // call stack to immediately find - // the place where the the array - // with the wrong size was set up. - - // While we're at it, we can do - // another check: the coefficient - // is a scalar, but the Function - // class also represents - // vector-valued function. A scalar - // function must therefore be - // considered as a vector-valued - // function with only one - // component, so the only valid - // component for which a user might - // ask is zero (we always count - // from zero). The following - // assertion checks this. (The - // ``1'' is denotes the number of - // components that this function - // has.) - Assert (component == 0, - ExcWrongComponent (component, 1)); - - for (unsigned int i=0; i -LaplaceProblem::LaplaceProblem () : - dof_handler (triangulation) -{}; - - - - // This is the function - // ``make_grid_and_dofs'' from the - // previous example, minus the - // generation of the grid. Everything - // else is unchanged. -template -void LaplaceProblem::setup_system () -{ - dof_handler.distribute_dofs (fe); - - cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << endl; - - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); - - system_matrix.reinit (sparsity_pattern); - - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -}; - - - - // As in the previous examples, this - // function is not changed much with - // regard to its functionality, but - // there are still some optimizations - // which we will show. For this, it - // is important to note that if - // efficient solvers are used (such - // as the preconditions CG method), - // assembling the matrix and right - // hand side can take a comparable - // time, and it is worth the effort - // to use one or two optimizations at - // some places. - // - // What we will show here is how we - // can avoid calls to the - // shape_value, shape_grad, and - // quadrature_point functions of the - // FEValues object, and in particular - // optimize away most of the virtual - // function calls of the Function - // object. The way to do so will be - // explained in the following, while - // those parts of this function that - // are not changed with respect to - // the previous example are not - // commented on. -template -void LaplaceProblem::assemble_system () -{ - // This time, we will again use a - // constant right hand side - // function, but a variable - // coefficient. The following - // object will be used for this: - const Coefficient coefficient; - - QGauss2 quadrature_formula; - - FEValues fe_values (fe, quadrature_formula, - UpdateFlags(update_values | - update_gradients | - update_q_points | - update_JxW_values)); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.n_quadrature_points; - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - vector local_dof_indices (dofs_per_cell); - - // Below, we will ask the - // Coefficient class to compute the - // values of the coefficient at all - // quadrature points on one cell at - // once. For this, we need some - // space to store the values in, - // which we use the following - // variable for: - vector coefficient_values (n_q_points); - - DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - cell_matrix.clear (); - cell_rhs.clear (); - - // As before, we want the - // FEValues object to compute - // the quantities which we told - // him to compute in the - // constructor using the update - // flags. - fe_values.reinit (cell); - // Now, these quantities are - // stored in arrays in the - // FEValues object. Usually, - // the internals of how and - // where they are stored is not - // something that the outside - // world should know, but since - // this is a time critical - // function we decided to - // publicize these arrays a - // little bit, and provide - // facilities to export the - // address where this data is - // stored. - // - // For example, the values of - // shape function j at - // quadrature point q is stored - // in a matrix, of which we can - // get the address as follows - // (note that this is a - // reference to the matrix, - // symbolized by the ampersand, - // and that it must be a - // constant reference, since - // only read-only access is - // granted): - const FullMatrix - & shape_values = fe_values.get_shape_values(); - // Instead of writing - // fe_values.shape_value(j,q) - // we can now write - // shape_values(j,q), i.e. the - // function call needed - // previously for each access - // has been otimized away. - // - // There are alike functions - // for almost all data elements - // in the FEValues class. The - // gradient are accessed as - // follows: - const vector > > - & shape_grads = fe_values.get_shape_grads(); - // The data type looks a bit - // unwieldy, since each entry - // in the matrix (j,q) now - // needs to be the gradient of - // the shape function, which is - // a vector. - // - // Similarly, access to the - // place where quadrature - // points and the determinants - // of the Jacobian matrices - // times the weights of the - // respective quadrature points - // are stored, can be obtained - // like this: - const vector - & JxW_values = fe_values.get_JxW_values(); - const vector > - & q_points = fe_values.get_quadrature_points(); - // Admittedly, the declarations - // above are not easily - // readable, but they can save - // many function calls in the - // inner loops and can thus - // make assemblage faster. - // - // An additional advantage is - // that the inner loops are - // simpler to read, since the - // fe_values object is no more - // explicitely needed to access - // the different fields (see - // below). Unfortunately, - // things became a bit - // inconsistent, since the - // shape values are accessed - // via the FullMatrix operator - // (), i.e. using parentheses, - // while all the other fields - // are accessed through vector - // operator [], i.e. using - // brackets. This is due to - // historical reasons and - // frequently leads to a bit of - // confusion, but since the - // places where this happens - // are few in well-written - // programs, this is not too - // big a problem. - - // There is one more thing: in - // this example, we want to use - // a non-constant - // coefficient. In the previous - // example, we have called the - // ``value'' function of the - // right hand side object for - // each quadrature - // point. Unfortunately, that - // is a virtual function, so - // calling it is relatively - // expensive. Therefore, we use - // a function of the Function - // class which returns the - // values at all quadrature - // points at once; that - // function is still virtual, - // but it needs to be computed - // once per cell only, not once - // in the inner loop: - coefficient.value_list (q_points, coefficient_values); - // It should be noted that the - // creation of the - // coefficient_values object is - // done outside the loop over - // all cells to avoid memory - // allocation each time we - // visit a new cell. Contrary - // to this, the other variables - // above were created inside - // the loop, but they were only - // references to memory that - // has already been allocated - // (i.e. they are pointers to - // that memory) and therefore, - // no new memory needs to be - // allocated; in particular, by - // declaring the pointers as - // close to their use as - // possible, we give the - // compiler a better choice to - // optimize them away - // altogether, something which - // it definitely can't do with - // the coefficient_values - // object since it is too - // complicated, but mostly - // because it's address is - // passed to a virtual function - // which is not knows at - // compile time. - - // Using the various - // abbreviations, the loops - // then look like this (the - // parentheses around the - // product of the two gradients - // are needed to indicate the - // dot product; we have to - // overrule associativity of - // the operator* here, since - // the compiler would otherwise - // complain about an undefined - // product of double*gradient - // since it parses - // left-to-right): - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; i boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - boundary_values); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); -}; - - - - // The solution process again looks - // mostly like in the previous - // examples. However, we will now use - // a preconditioned conjugate - // gradient algorithm. It is not very - // difficult to make this change: -template -void LaplaceProblem::solve () -{ - SolverControl solver_control (1000, 1e-12); - PrimitiveVectorMemory<> vector_memory; - SolverCG<> cg (solver_control, vector_memory); - - // The only thing we have to alter - // is that we need an object which - // will act as a preconditioner. We - // will use SSOR (symmetric - // successive overrelaxation), with - // a relaxation factor of 1.2. For - // this purpose, the SparseMatrix - // class has a function which does - // one SSOR step, and we need to - // package the address of this - // function together with the - // matrix on which it should act - // (which is the matrix to be - // inverted) and the relaxation - // factor into one object. This can - // be done like this: - PreconditionRelaxation<> - preconditioner(system_matrix, - &SparseMatrix::template precondition_SSOR, - 1.2); - // The default template parameters - // of the PreconditionRelaxation - // class are the matrix and the - // vector type, which default to - // the types used in this program. - - // Calling the solver now looks - // mostly like in the example - // before, but where there was an - // object of type - // PreconditionIdentity before, - // there now is the newly generated - // preconditioner object. - cg.solve (system_matrix, solution, system_rhs, - preconditioner); - - cout << " " << solver_control.last_step() - << " CG iterations needed to obtain convergence." - << endl; -}; - - - - // Writing output to a file is mostly - // the same as for the previous - // example, but here we will show how - // to modify some output options and - // how to construct a different - // filename for each refinement - // cycle. -template -void LaplaceProblem::output_results (const unsigned int cycle) const -{ - DataOut data_out; - - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); - - data_out.build_patches (); - - // For this example, we would like - // to write the output directly to - // a file in Encapsulated - // Postscript (EPS) format. The - // library supports this, but - // things may be a bit more - // difficult sometimes, since EPS - // is a printing format, unlike - // most other supported formats - // which serve as input for - // graphical tools. Therefore, you - // can't scale or rotate the image - // after it has been written to - // disk, and you have to decide - // about the viewpoint or the - // scaling in advance. - // - // The defaults in the library are - // usually quite reasonable, and - // regarding viewpoint and scaling - // they coincide with the defaults - // of Gnuplot. However, since this - // is a tutorial, we will - // demonstrate how to change - // them. For this, we first have to - // generate an object describing - // the flags for EPS output: - DataOutBase::EpsFlags eps_flags; - // They are initialized with the - // default values, so we only have - // to change those that we don't - // like. For example, we would like - // to scale the z-axis differently - // (stretch each data point in - // z-direction by a factor of four): - eps_flags.z_scaling = 4; - // Then we would also like to alter - // the viewpoint from which we look - // at the solution surface. The - // default is at an angle of 60 - // degrees down from the vertical - // axis, and 30 degrees rotated - // against it in mathematical - // positive sense. We raise our - // viewpoint a bit and look more - // along the y-axis: - eps_flags.azimut_angle = 40; - eps_flags.turn_angle = 10; - // That shall suffice. There are - // more flags, for example whether - // to draw the mesh lines, which - // data vectors to use for - // colorization of the interior of - // the cells, and so on. You may - // want to take a look at the - // documentation of the EpsFlags - // structure to get an overview of - // what is possible. - // - // The only thing still to be done, - // is to tell the output object to - // use these flags: - data_out.set_flags (eps_flags); - // The above way to modify flags - // requires recompilation each time - // we would like to use different - // flags. This is inconvenient, and - // we will see more advanced ways - // in following examples where the - // output flags are determined at - // run time using an input file. - - // Finally, we need the filename to - // which the results is to be - // written. We would like to have - // it of the form - // ``solution-N.eps'', where N is - // the number of refinement - // cycle. Thus, we have to convert - // an integer to a part of a - // string; this can be done using - // the ``sprintf'' function, but in - // C++ there is a more elegant way: - // write everything into a special - // stream (just like writing into a - // file or to the screen) and - // retrieve that as a string. This - // applies the usual conversions - // from integer to strings, and one - // could as well give stream - // modifiers such as ``setf'', - // ``setprecision'', and so on. - ostrstream filename; - filename << "solution-" - << cycle - << ".eps"; - // In order to append the final - // '\0', we have to put an ``ends'' - // to the end of the string: - filename << ends; - - // We can get whatever we wrote to - // the stream using the ``str()'' - // function. Use that as filename - // for the output stream: - ofstream output (filename.str()); - // And then write the data to the - // file. - data_out.write_eps (output); -}; - - - -template -void LaplaceProblem::run () -{ - for (unsigned int cycle=0; cycle<6; ++cycle) - { - cout << "Cycle " << cycle << ':' << endl; - - // If this is the first round, - // then we have no grid yet, - // and we will create it - // here. In previous examples, - // we have already used some of - // the functions from the - // GridGenerator class. Here we - // would like to read a grid - // from a file where the cells - // are stored and which may - // originate from someone else, - // or may be the product of a - // mesh generator tool. - // - // In order to read a grid from - // a file, we generate an - // object of data type GridIn - // and associate the - // triangulation to it (i.e. we - // tell it to fill our - // triangulation object when we - // ask it to read the - // file). Then we open the - // respective file and fill the - // triangulation with it: - if (cycle == 0) - { - GridIn grid_in; - grid_in.attach_triangulation (triangulation); - - // We would now like to - // read the file. However, - // the input file is only - // for a two-dimensional - // triangulation, while - // this function is a - // template for arbitrary - // dimension. Since this is - // only a demonstration - // program, we will not use - // different input files - // for the different - // dimensions, but rather - // kill the whole program - // if we are not in 2D: - Assert (dim==2, ExcInternalError()); - // ExcInternalError is a - // globally defined - // exception, which may be - // thrown whenever - // something is terribly - // wrong. Usually, one - // would like to use more - // specific exceptions, and - // particular in this case - // one would of course try - // to do something else if - // ``dim'' is not equal to - // two, e.g. create a grid - // using library - // functions. Aborting a - // program is usually not a - // good idea and assertions - // should really only be - // used for exceptional - // cases which should not - // occur, but might due to - // stupidity of the - // programmer, user, or - // someone else. The - // situation above is not a - // very clever use of - // Assert, but again: this - // is a tutorial and it - // might be worth to show - // what not to do, after - // all. - - // We can now actually read - // the grid. It is in UCD - // (unstructured cell data) - // format, as supported by - // AVS Explorer, for - // example: - ifstream input_file("circle-grid.inp"); - grid_in.read_ucd (input_file); - - // The grid in the file - // describes a - // circle. Therefore we - // have to use a boundary - // object which tells the - // triangulation where to - // put new points on the - // boundary when the grid - // is refined. This works - // in the same way as in - // the first example. Note - // that the - // HyperBallBoundary - // constructor takes two - // parameters, the center - // of the ball and the - // radius, but that their - // default (the origin and - // 1.0) are the ones which - // we would like to use - // here. - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); - } - // If this is not the first - // cycle, then simply refine - // the grid once globally. - else - triangulation.refine_global (1); - - // Write some output and do all - // the things that we have - // already seen in the previous - // examples. - cout << " Number of active cells: " - << triangulation.n_active_cells() - << endl - << " Total number of cells: " - << triangulation.n_cells() - << endl; - - setup_system (); - assemble_system (); - solve (); - output_results (cycle); - }; -}; - - - - // The main function looks mostly - // like the one in the previous - // example, so we won't comment on it - // further. -int main () -{ - deallog.depth_console (0); - - LaplaceProblem<2> laplace_problem_2d; - laplace_problem_2d.run (); - - // Finally, we have promised to - // trigger an exception in the - // Coefficient class. For this, we - // have to call its ``value_list'' - // function with two arrays of - // different size (the number in - // parentheses behind the name of - // the object). We have commented - // out these lines in order to - // allow the program to exit - // gracefully in normal situations - // (we use the program in - // day-to-day testing of changes to - // the library as well), so you - // will only get the exception by - // un-commenting the following - // lines. -/* - Coefficient<2> coefficient; - vector > points (2); - vector coefficient_values (1); - coefficient.value_list (points, coefficient_values); -*/ - - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-6/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-6/.cvsignore deleted file mode 100644 index c5c1cbecff..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-6/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-6 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-6/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-6/Makefile deleted file mode 100644 index 445913746c..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-6/Makefile +++ /dev/null @@ -1,167 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-6/step-6.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-6/step-6.cc deleted file mode 100644 index 0629bdc820..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-6/step-6.cc +++ /dev/null @@ -1,1035 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 2000 */ - - // The first few files have already - // been covered in previous examples - // and will thus not be further - // commented on. -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - // From the following include file we - // will import the declaration of the - // quadratic finite element class, - // which in analogy to ``FEQ1'' for - // the linear element is called - // ``FEQ2''. The Lagrange elements of - // poynomial degrees one through four - // are all declared in this file. -#include - - // We will not read the grid from a - // file as in the previous example, - // but generate it using a function - // of the library. However, we will - // want to write out the locally - // refined grids in each step, so we - // need the following include file - // instead of ``grid_in.h'': -#include - - // When using locally refined grids, - // we will get so-called ``hanging - // nodes''. However, the standard - // finite element methods assumes - // that the discrete solution spaces - // be continuous, so we need to make - // sure that the degrees of freedom - // on hanging nodes conform to some - // constraints such that the global - // solution is continuous. The - // following file contains a class - // which is used to handle these - // constraints: -#include - - // Finally, we would like to use a - // simple way to adaptively refine - // the grid. While in general, - // adaptivity is very - // problem-specific, the error - // indicator in the following file - // often yields quite nicely adapted - // grids for a wide class of - // problems. -#include - -#include - - - // The main class is again almost - // unchanged. Two additions, however, - // are made: we have added the - // ``refine'' function, which is used - // to adaptively refine the grid - // (instead of the global refinement - // in the previous examples), and a - // variable which will hold the - // constraints associated to the - // hanging nodes. -template -class LaplaceProblem -{ - public: - LaplaceProblem (); - // For educational purposes, we - // add a destructor here. The - // reason why we do so will be - // explained in the definition of - // this function. - ~LaplaceProblem (); - void run (); - - private: - void setup_system (); - void assemble_system (); - void solve (); - void refine_grid (); - void output_results (const unsigned int cycle) const; - - Triangulation triangulation; - DoFHandler dof_handler; - - // In order to use the quadratic - // element, we only have to - // replace the declaration of the - // ``fe'' variable like this: - FEQ2 fe; - - // This is the new variable in - // the main class. We need an - // object which holds a list of - // the constraints from the - // hanging nodes: - ConstraintMatrix hanging_node_constraints; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - Vector solution; - Vector system_rhs; -}; - - - -template -class Coefficient : public Function -{ - public: - virtual double value (const Point &p, - const unsigned int component = 0) const; - - virtual void value_list (const vector > &points, - vector &values, - const unsigned int component = 0) const; -}; - - - -template -double Coefficient::value (const Point &p, - const unsigned int) const -{ - if (p.square() < 0.5*0.5) - return 20; - else - return 1; -}; - - - -template -void Coefficient::value_list (const vector > &points, - vector &values, - const unsigned int component) const -{ - const unsigned int n_points = points.size(); - - Assert (values.size() == n_points, - ExcVectorHasWrongSize (values.size(), n_points)); - - Assert (component == 0, - ExcWrongComponent (component, 1)); - - for (unsigned int i=0; i -LaplaceProblem::LaplaceProblem () : - dof_handler (triangulation) -{}; - - - // Here comes the added destructor of - // the class. The reason why we - // needed to do so is a subtle change - // in the order of data elements in - // the class as compared to all - // previous examples: the - // ``dof_handler'' object was defined - // before and not after the ``fe'' - // object. Of course we could have - // left this order unchanged, but we - // would like to show what happens if - // the order is reversed since this - // produces a rather nasty effect and - // results in an error which is - // difficult to track down if one - // does not know what happens. - // - // Basically what happens is the - // following: when we distribute the - // degrees of freedom using the - // function call - // ``dof_handler.distribute_dofs()'', - // the ``dof_handler'' also stores a - // pointer to the finite element in - // use. Since this pointer is used - // every now and then until either - // the degrees of freedom are - // re-distributed using another - // finite element object or until the - // ``dof_handler'' object is - // detroyed, it would be unwise if we - // would allow the finite element - // object to be deleted before - // ``dof_handler'' object. To - // disallow this, the DoF handler - // increases a counter inside the - // finite element object which counts - // how many objects use that finite - // element (this is what the - // ``Subscriptor'' class is used for, - // in case you want something like - // this for your own programs). The - // finite element object will refuse - // its destruction if that counter is - // larger than zero, since then some - // other objects might rely on the - // persistence of the finite element - // object. An exception will then be - // thrown and the program will - // usually abort upon the attempt to - // destroy the finite element. - // - // As a sidenote, we remark that - // these exception are not - // particularly popular among - // programmers, since they only tell - // us that some other object is still - // using the object that is presently - // destructed, but not which one. It - // is therefore often rather - // time-consuming to find out where - // the problem exactly is, although - // it is then usually straightforward - // to remedy the situation. However, - // we believe that the effort to find - // invalid references to objects that - // do no longer exist is less if the - // problem is detected once the - // reference becomes invalid, rather - // than when non-existent objects are - // actually accessed again, since - // then usually only invalid data is - // accessed, but no error is - // immediately raised. - // - // Coming back to the present - // situation, if we did not write - // this destructor, the compiler will - // generate code that triggers - // exactly the behavious sketched - // above. The reason is that member - // variables of the - // ``LaplaceProblem'' class are - // destructed bottom-up, as always in - // C++. Thus, the finite element - // object will be destructed before - // the DoF handler object, since its - // declaration is below the one of - // the DoF handler. This triggers the - // situation above, and an exception - // will be raised when the ``fe'' - // object is destructed. What needs - // to be done is to tell the - // ``dof_handler'' object to release - // its lock to the finite element. Of - // course, the ``dof_handler'' will - // only release its lock if it really - // does not need the finite element - // any more, i.e. when all finite - // element related data is deleted - // from it. For this purpose, the - // ``DoFHandler'' class has a - // function ``clear'' which deletes - // all degrees of freedom, releases - // its lock to the finite element and - // sets its internal pointer to a - // null pointer. After this, you can - // safely destruct the finite element - // object since its internal counter - // is then zero. - // - // For completeness, we add the - // output of the exception that would - // be triggered without this - // destructor to the end of the - // results section of this example. -template -LaplaceProblem::~LaplaceProblem () -{ - dof_handler.clear (); -}; - - - -template -void LaplaceProblem::setup_system () -{ - // To distribute degrees of - // freedom, the ``dof_handler'' - // variable takes only the finite - // element object. In this case, it - // will distribute one degree of - // freedom per vertex, one per line - // and one in the interior of the - // cell. You need not specify these - // details since they are encoded - // into the finite element object - // from which the ``dof_handler'' - // gets the necessary information. - dof_handler.distribute_dofs (fe); - - // After setting up all the degrees - // of freedoms, we can make up the - // list of constraints associated - // with the hanging nodes. This is - // done using the following - // function calls (the first clears - // the contents of the object, - // which is still there from the - // previous cycle, i.e. before the - // grid was refined): - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - // In principle, the - // ConstraintMatrix class can hold - // other constraints as well, - // i.e. constraints that do not - // stem from hanging - // nodes. Sometimes, it is useful - // to use such constraints, in - // which case they may be added to - // the ConstraintMatrix object - // after the hanging node - // constraints were computed. After - // all constraints have been added, - // they need to be sorted and - // rearranged to perform some - // actions more efficiently. This - // postprocessing is done using the - // ``close'' function, after which - // no further constraints may be - // added any more. - hanging_node_constraints.close (); - - // Since we use higher order finite - // elements, the maximum number of - // entries per line of the matrix - // is larger than for the linear - // elements. The - // ``max_couplings_between_dofs()'' - // function takes care of this: - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - - // The constrained hanging nodes - // will later be eliminated from - // the linear system of - // equations. When doing so, some - // additional entries in the global - // matrix will be set to non-zero - // values, so we have to reserve - // some space for them here. Since - // the process of elimination of - // these constrained nodes is - // called ``condensation'', the - // functions that eliminate them - // are called ``condense'' for both - // the system matrix and right hand - // side, as well as for teh - // sparsity pattern. - hanging_node_constraints.condense (sparsity_pattern); - - // Now all non-zero entries of the - // matrix are known (i.e. those - // from regularly assembling the - // matrix and those that were - // introduced by eliminating - // constraints). We can thus close - // the sparsity pattern and remove - // unneeded space: - sparsity_pattern.compress(); - - system_matrix.reinit (sparsity_pattern); - - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -}; - - - -template -void LaplaceProblem::assemble_system () -{ - const Coefficient coefficient; - // Since we use a higher order - // finite element, we also need to - // adjust the order of the - // quadrature formula in order to - // integrate the matrix entries - // with sufficient accuracy. For - // the quadratic polynomials of - // which the finite element which - // we use consist, a Gauss formula - // with three points in each - // direction is sufficient. - QGauss3 quadrature_formula; - - // The ``FEValues'' object - // automatically adjusts the - // computation of values to the - // finite element. In fact, the - // ``FEValues'' class does not do - // many computations itself, but - // mostly delegates its work to the - // finite element class to which - // its first parameter - // belongs. That class then knows - // how to compute the values of - // shape functions, etc. - FEValues fe_values (fe, quadrature_formula, - UpdateFlags(update_values | - update_gradients | - update_q_points | - update_JxW_values)); - - // Here it comes handy that we have - // introduced an abbreviation for - // the number of degrees of freedom - // per cell before: the following - // value will be set to 9 (in 2D) - // now, where it was 4 before. - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.n_quadrature_points; - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - vector local_dof_indices (dofs_per_cell); - - vector coefficient_values (n_q_points); - - // We can now go on with assembling - // the matrix and right hand - // side. Note that this code is - // copied without change from the - // previous example, even though we - // are now using another finite - // element. The actual difference - // in what is done is inside the - // call to ``fe_values.reinit - // (cell)'', but you need not care - // about what happens there. For - // the user of the ``fe_values'' - // object, the actual finite - // element type is transparent. - DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - cell_matrix.clear (); - cell_rhs.clear (); - - fe_values.reinit (cell); - const FullMatrix - & shape_values = fe_values.get_shape_values(); - const vector > > - & shape_grads = fe_values.get_shape_grads(); - const vector - & JxW_values = fe_values.get_JxW_values(); - const vector > - & q_points = fe_values.get_quadrature_points(); - - coefficient.value_list (q_points, coefficient_values); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; i boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - boundary_values); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); -}; - - - -template -void LaplaceProblem::solve () -{ - SolverControl solver_control (1000, 1e-12); - PrimitiveVectorMemory<> vector_memory; - SolverCG<> cg (solver_control, vector_memory); - - PreconditionRelaxation<> - preconditioner(system_matrix, - &SparseMatrix::template precondition_SSOR, - 1.2); - - cg.solve (system_matrix, solution, system_rhs, - preconditioner); - - // To set the constrained nodes to - // resonable values, you have to - // use the following function. It - // computes the values of these - // nodes from the values of the - // unconstrained nodes, which are - // the solutions of the linear - // system just solved. - hanging_node_constraints.distribute (solution); -}; - - - // Instead of global refinement, we - // now use a slightly more elaborate - // scheme. We will use the - // ``KellyErrorEstimator'' class - // which implements an error - // estimator for the Laplace - // equation; it can in principle - // handle variable coefficients, but - // we will not use these advanced - // features, but rather use its most - // simple form since we are not - // interested in quantitative results - // but only in a quick way to - // generate locally refined grids. - // - // Although the error estimator - // derived by Kelly et al. was - // originally developed for Laplace's - // equation, we have found that it is - // also well suited to quickly - // generate locally refined grids for - // a wide class of - // problems. Basically, it looks at - // the jumps of the gradients of the - // solution over the faces of cells - // (which is a measure for the second - // derivatives) and scales it by the - // size of the cell. It is therefore - // a measure for the local smoothness - // of the solution at the place of - // each cell and it is thus - // understandable that it yields - // reasonable grids also for - // hyperbolic transport problems or - // the wave equation as well, - // although these grids are certainly - // suboptimal compared to approaches - // specially tailored to the - // problem. This error estimator may - // therefore be understood as a quick - // way to test an adaptive program. -template -void LaplaceProblem::refine_grid () -{ - // The output of the error - // estimator class is an error - // indicator for each cell. We - // therefore need a vector with as - // many elements as there are - // active cells. Since accuracy is - // not that important here, the - // data type for the error values - // on each cell is ``float'' - // instead of ``double''. - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - // Next, the error estimator can - // handle Neumann boundary - // conditions. For this, it needs - // to know which parts of the - // boundary have Neumann boundary - // conditions and teh respective - // boundary values there. This - // information is mediated by a map - // in which the keys are the - // boundary part numbers and the - // values are pointers to the - // boundary value functions. We - // create such a map, but since we - // do not use Neumann boundary - // conditions, the map will not - // contain entries. - KellyErrorEstimator::FunctionMap neumann_boundary; - - // Now we call the error - // estimator. The parameters should - // be clear apart from the - // quadrature formula: as said - // above, the jump of the gradients - // of the solution across the faces - // of a cell are considered. They - // are integrated along the face, - // but as usual in finite element - // programs the integration is done - // using quadrature. Since the - // error estimator class can't know - // itself which quadrature formula - // might be appropriate, we have to - // pass one to the function (of - // course, the order of the - // quadrature formula should be - // adapted to the finite element - // under consideration). Note that - // since the quadrature has to take - // place along faces, the dimension - // of the quadrature formula is - // ``dim-1'' rather then ``dim''. - // - // (What constitutes a suitable - // quadrature rule here of course - // depends on knowledge of the way - // the error estimator evaluates - // the solution field. As said - // above, the jump of the gradient - // is integrated over each face, - // which would be a quadratic - // function on each face for the - // quadratic elements in use in - // this example. In fact, however, - // it is the square of the jump of - // the gradient, as explained in - // the documentation of that class, - // and that is a quartic function, - // for which a 3 point Gauss - // formula is sufficient since it - // integrates polynomials up to - // order 5 exactly.) - KellyErrorEstimator::estimate (dof_handler, - QGauss3(), - neumann_boundary, - solution, - estimated_error_per_cell); - - // The above function returned one - // error indicator value for each - // cell in the - // ``estimated_error_per_cell'' - // array. Refinement is now done as - // follows: refine those 30 per - // cent of the cells with the - // highest error values, and - // coarsen the 3 per cent of cells - // with the lowest values. - // - // One can easily verify that if - // the second number were zero, - // this would approximately result - // in a doubling of cells in each - // step in two space dimensions, - // since for each of the 30 per - // cent of cells four new would be - // replaced. In practice, some more - // cells are usually produced since - // it is disallowed that a cell is - // refined twice while the neighbor - // cell is not refined; in that - // case, the neighbor cell would be - // refined as well. - // - // In many applications, the number - // of cells to be coarsened would - // be set to something larger than - // only three per cent. A non-zero - // value is useful especially if - // for some reason the initial - // (coarse) grid is already rather - // refined. In that case, it might - // be necessary to refine it in - // some regions, while coarsening - // in some other regions is - // useful. In our case here, the - // initial grid is very coarse, so - // coarsening is only necessary in - // a few regions where - // over-refinement may have taken - // place. Thus a small, non-zero - // value is appropriate here. - triangulation.refine_and_coarsen_fixed_number (estimated_error_per_cell, - 0.3, 0.03); - - // After the previous function has - // exited, some cells are flagged - // for refinement, and some other - // for coarsening. The refinement - // or coarsening itself is not - // performed by now, however, since - // there are many cases where - // further modifications of these - // flags is useful. Here, we don't - // want to do any such thing, so we - // can tell the triangulation to - // perform the actions for which - // the cells are flagged. - triangulation.execute_coarsening_and_refinement (); -}; - - - -template -void LaplaceProblem::output_results (const unsigned int cycle) const -{ - // We want to write the grid in - // each cycle. Here is another way - // to quickly produce a filename - // based on the cycle number. It - // assumes that the numbers `0' - // through `9' are represented - // consecutively in the character - // set (which is the case in all - // known character sets). However, - // this will only work if the cycle - // number is less than ten, which - // we check by an assertion. - string filename = "grid-"; - filename += ('0' + cycle); - Assert (cycle < 10, ExcInternalError()); - - filename += ".eps"; - ofstream output (filename.c_str()); - - // Using this filename, we write - // each grid as a postscript file. - GridOut grid_out; - grid_out.write_eps (triangulation, output); -}; - - - -template -void LaplaceProblem::run () -{ - for (unsigned int cycle=0; cycle<8; ++cycle) - { - cout << "Cycle " << cycle << ':' << endl; - - if (cycle == 0) - { - // Instead of reading the - // grid from a file on disk - // as in the previous - // example, we now again - // create it using a - // library function. The - // domain is again a - // circle, which is why we - // have to provide a - // suitable boundary object - // as well. - // - // You will notice by - // looking at the coarse - // grid that it is of - // inferior quality than - // the one which we read - // from the file in the - // previous example: the - // cells are less equally - // formed. However, using - // the library function - // this program works in - // any space dimension, - // which was not the case - // before. - GridGenerator::hyper_ball (triangulation); - - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); - - triangulation.refine_global (1); - } - else - // In case this is not the - // first cycle, we want to - // refine the grid. Unlike - // the global refinement - // employed in the last - // example, we now use the - // adaptive procedure - // described in the function - // which we now call: - { - refine_grid (); - }; - - - cout << " Number of active cells: " - << triangulation.n_active_cells() - << endl; - - setup_system (); - - cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << endl; - - assemble_system (); - solve (); - output_results (cycle); - }; - - // The solution on the final grid - // is now written to a file. As - // already done in one of the - // previous examples, we use the - // EPS format for output, and to - // obtain a reasonable view on the - // solution, we rescale the z-axis - // by a factor of four. - DataOut::EpsFlags eps_flags; - eps_flags.z_scaling = 4; - - DataOut data_out; - data_out.set_flags (eps_flags); - - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); - data_out.build_patches (); - - ofstream output ("final-solution.eps"); - data_out.write_eps (output); -}; - - - // The main function is unaltered in - // its functionality against the - // previous example, but we have - // taken a step of additional - // caution. Sometimes, something goes - // wrong (such as insufficient disk - // space upon writing an output file, - // not enough memory when trying to - // allocate a vector or a matrix, or - // if we can't read from or write to - // a file for whatever reason), and - // in these cases the library will - // throw exceptions. Since they do - // not constitute programming errors, - // these exceptions also are not - // switched off in optimized mode, in - // constrast to the ``Assert'' macro - // which we have used to test against - // programming errors. If uncought, - // these exceptions propagate the - // call tree up to the ``main'' - // function, and if they are not - // caught there either, the program - // is aborted. In many cases, like if - // there is not enough memory or disk - // space, we can't do anything but we - // can at least print some text - // trying to explain the reason why - // the program failed. A way to do so - // is shown in the following. It is - // certainly useful to write any - // larger program in this way, and - // you can do so by more or less - // copying this function apart from - // the ``try'' block which contains - // the code that constitutes the - // actual functionality. -int main () -{ - - // The general idea behind the - // layout of this function is as - // follows: let's try to run the - // program as we did before... - try - { - deallog.depth_console (0); - - LaplaceProblem<2> laplace_problem_2d; - laplace_problem_2d.run (); - } - // ...and if this should fail, try - // to gather as much information as - // possible. Specifically, if the - // exception that was thrown is an - // object of a class that is - // derived from the C++ standard - // class ``exception'', then we can - // use the ``what'' member function - // to get a string which describes - // the reason why the exception was - // thrown. - // - // The deal.II exception classes - // are all derived from the - // standard class, and in - // particular, the ``exc.what()'' - // function will return - // approximately the same string as - // would be generated if the - // exception was thrown using the - // ``Assert'' macro. You have seen - // the output of such an exception - // in the previous example, and you - // then know that it contains the - // file and line number of where - // the exception occured, and some - // other information. This is also - // what would be printed in the - // following. - catch (exception &exc) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Exception on processing: " << endl - << exc.what() << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - // We can't do much more than - // printing as much information - // as we can get to, so abort - // with error: - return 1; - } - // If the exception that was thrown - // somewhere was not an object of a - // class derived from the standard - // ``exception'' class, then we - // can't do anything at all. We - // then simply print an error - // message and exit. - catch (...) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Unknown exception!" << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - return 1; - }; - - // If we got to this point, there - // was no exception which - // propagated up to the main - // functino (maybe there were some, - // but they were caught somewhere - // in the program or the - // library). Therefore, the program - // performed as was expected and we - // can return without error. - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-7/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-7/.cvsignore deleted file mode 100644 index 53721fd585..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-7/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-7 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-7/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-7/Makefile deleted file mode 100644 index 445913746c..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-7/Makefile +++ /dev/null @@ -1,167 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-7/step-7.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-7/step-7.cc deleted file mode 100644 index db7a49f68a..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-7/step-7.cc +++ /dev/null @@ -1,1242 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 2000 */ - - // These first include files have all - // been treated in previous examples, - // so we won't explain what is in - // them again. -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - // In this example, we will not use - // the numeration scheme which is - // used per default by the - // ``DoFHandler'' class, but will - // renumber them using the - // Cuthill-McKee algorithm. The - // necessary functions are declared - // in the following file: -#include - // Then we will show a little trick - // how we can make sure that objects - // are not deleted while they are - // still in use. For this purpose, - // there is the ``SmartPointer'' - // helper class, which is declared in - // this file: -#include - // Then we will want to use the - // ``integrate_difference'' function - // mentioned in the introduction. It - // comes from this file: -#include - // And finally, we need to use the - // ``FEFaceValues'' class, which is - // declare in the same file as the - // ``FEValues'' class: -#include - -#include - - - - // Since we want to compare the - // exactly known continuous solution - // to the computed one, we need a - // function object which represents - // the continuous solution. On the - // other hand, we need the right hand - // side function, and that one of - // course shares some characteristics - // with the solution. In order to - // reduce dependencies which arise if - // we have to change something in - // both classes at the same time, we - // exclude the common characteristics - // of both functions into a base - // class. - // - // The common characteristics for the - // given solution, which as explained - // in the introduction is a sum of - // three exponentials, are here: the - // number of exponentials, their - // centers, and their half width. We - // declare them in the following - // class. Since the number of - // exponentials is a constant scalar - // integral quantity, C++ allows its - // definition (i.e. assigning a - // value) right at the place of - // declaration (i.e. where we declare - // that such a variable exists). -template -class SolutionBase -{ - protected: - static const unsigned int n_source_centers = 3; - static const Point source_centers[n_source_centers]; - static const double width; -}; - - - // The variables which denote the - // centers and the width of the - // exponentials have just been - // declared, now we still need to - // assign values to them. Here, we - // can show another small piece of - // template sourcery, namely how we - // can assign different values to - // these variables depending on the - // dimension. We will only use the 2d - // case in the program, but we show - // the 1d case for exposition of a - // useful technique. - // - // First we assign values to the - // centers for the 1d case, where we - // place the centers equidistanly at - // -1/3, 0, and 1/3: -template <> -const Point<1> -SolutionBase<1>::source_centers[SolutionBase<1>::n_source_centers] -= { Point<1>(-1.0 / 3.0), - Point<1>(0.0), - Point<1>(+1.0 / 3.0) }; - - // Then we place the centers for the - // 2d case as follows: -template <> -const Point<2> -SolutionBase<2>::source_centers[SolutionBase<2>::n_source_centers] -= { Point<2>(-0.5, +0.5), - Point<2>(-0.5, -0.5), - Point<2>(+0.5, -0.5) }; - - // There remains to assign a value to - // the half-width of the - // exponentials. We would like to use - // the same value for all dimensions, - // so here is how that works: -template -const double SolutionBase::width = 1./3.; - - - - // After declaring and defining the - // characteristics of solution and - // right hand side, we can declare - // the classes representing these - // two. They both represent - // continuous functions, so they are - // derived from the ``Function'' - // base class, and they also inherit - // the characteristics defined in the - // ``SolutionBase'' class. - // - // The actual classes are declared in - // the following. Note that in order - // to compute the error of the - // numerical solution against the - // continuous one in the L2 and H1 - // norms, we have to export value and - // gradient of the exact solution, - // which is done by overloading the - // respective virtual member - // functions in the ``Function'' base - // class. -template -class Solution : public Function, - protected SolutionBase -{ - public: - virtual double value (const Point &p, - const unsigned int component = 0) const; - virtual Tensor<1,dim> gradient (const Point &p, - const unsigned int component = 0) const; -}; - - - // The actual definition of the - // values and gradients of the exact - // solution class is according to - // their mathematical definition and - // probably needs not much - // explanation. -template -double Solution::value (const Point &p, - const unsigned int) const -{ - double return_value = 0; - for (unsigned int i=0; i shifted_point = p-source_centers[i]; - - // The ``Point'' class - // offers a member function - // ``square'' that does what - // it's name suggests. - return_value += exp(-shifted_point.square() / (width*width)); - }; - - return return_value; -}; - - - -template -Tensor<1,dim> Solution::gradient (const Point &p, - const unsigned int) const -{ - // In order to accumulate the - // gradient from the contributions - // of the exponentials, we allocate - // an object which denotes the - // mathematical quantity of a - // tensor of rank ``1'' and - // dimension ``dim''. Its default - // constructor sets it to the - // vector containing only zeroes, - // so we need not explicitely care - // for its initialization. - Tensor<1,dim> return_value; - // Note that we could as well have - // taken the type of the object to - // be ``Point''. Tensors of - // rank 1 and points are almost - // exchangeable, and have only very - // slightly different mathematical - // meanings. In fact, the - // ``Point'' class is derived - // from the ``Tensor<1,dim>'' - // class, which makes up for their - // mutual exchangeability. - - for (unsigned int i=0; i shifted_point = p-source_centers[i]; - - // For the gradient, note that - // it's direction is along - // (x-x_i), so we add up - // multiples of this distance - // vector, where the factor is - // given by the exponentials. - return_value += (-2 / (width*width) * - exp(-shifted_point.square() / (width*width)) * - shifted_point); - }; - - return return_value; -}; - - - - // Besides the function that - // represents the exact solution, we - // also need a function which we can - // use as right hand side when - // assembling the linear system of - // discretized equations. This is - // accomplished using the following - // class and the following definition - // of its function. Note that here we - // only need the value of the - // function, not its gradients or - // higher derivatives. -template -class RightHandSide : public Function, - protected SolutionBase -{ - public: - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - - // The value of the right hand side - // is given by the negative Laplacian - // of the solution plus the solution - // itself, since we wanted to solve - // Helmholtz's equation: -template -double RightHandSide::value (const Point &p, - const unsigned int) const -{ - double return_value = 0; - for (unsigned int i=0; i shifted_point = p-source_centers[i]; - - // The first contribution is - // the Laplacian: - return_value += ((2*dim - 4*shifted_point.square()/(width*width)) / - (width*width) * - exp(-shifted_point.square() / (width*width))); - // And the second is the - // solution itself: - return_value += exp(-shifted_point.square() / (width*width)); - }; - - return return_value; -}; - - - - // Then we need the class that does - // all the work. -//....................... -template -class LaplaceProblem -{ - public: -//......... - enum RefinementMode { - global_refinement, adaptive_refinement - }; - -//....... - LaplaceProblem (const FiniteElement &fe, - const RefinementMode refinement_mode); - ~LaplaceProblem (); - - void run (); - - private: -//....... - void setup_system (); - void assemble_system (); - void solve (); - void refine_grid (); - void process_solution (const unsigned int cycle) const; - - Triangulation triangulation; - DoFHandler dof_handler; - - // The finite elements which the - // objects of this class operate - // on are passed to the - // constructor of this class. It - // has to store a pointer to the - // finite element for the member - // functions to use. Now, for the - // present class there is no big - // deal in that, but since we - // want to show techniques rather - // than solutions in these - // programs, we will here point - // out a problem that often - // occurs -- and of course the - // right solution as well. - // - // Consider the following - // situation that occurs in all - // the example programs: we have - // a triangulation object, and we - // have a finite element object, - // and we also have an object of - // type ``DoFHandler'' that uses - // both of the first two. These - // three objects all have a - // lifetime that is rather long - // compared to most other - // objects: they are basically - // set at the beginning of the - // program or an outer loop, and - // they are destroyed at the very - // end. The question is: can we - // guarantee that the two objects - // which the ``DoFHandler'' uses, - // live at least as long as they - // are in use? This means that - // the ``DoFHandler'' must have a - // kind of lock on the - // destruction of the other - // objects, and it can only - // release this lock once it has - // cleared all active references - // to these objects. We have seen - // what happens if we violate - // this order of destruction in - // the previous example program: - // an exception is thrown that - // terminates the program in - // order to notify the programmer - // of this potentially dangerous - // state where an object is - // pointed to that no longer - // persists. - // - // We will show here how the - // library managed to find out - // that there are still active - // references to an - // object. Basically, the method - // is along the following line: - // all objects that are subject - // to such potentially dangerous - // pointers are derived from a - // class called - // ``Subscriptor''. For example, - // the ``Triangulation'', - // ``DoFHandler'', and a base - // class of the ``FiniteElement'' - // class are derived from - // ``Subscriptor``. This latter - // class does not offer much - // functionality, but it has a - // built-in counter which we can - // subscribe to, thus the name of - // the class. Whenever we - // initialize a pointer to that - // object, we can increase it use - // counter, and when we move away - // our pointer or do not need it - // any more, we decrease the - // counter again. This way, we - // can always check how many - // objects still use that - // object. If an object of a - // class that is derived from the - // ``Subscriptor'' class is - // destroyed, it also has to call - // the destructor of the - // ``Subscriptor'' class; this - // will then check whether the - // counter is really zero. If - // yes, then there are no active - // references to this object any - // more, and we can safely - // destroy it. If the counter is - // non-zero, however, then the - // destruction would result in - // stale and thus potentially - // dangerous pointers, and we - // rather throw an exception to - // alert the programmer that she - // is doing something dangerous - // and better had her program - // fixed. - // - // While this certainly all - // sounds very well, it has some - // problems in terms of - // usability: what happens if I - // forget to increase the counter - // when I let a pointer point to - // such an object? And what - // happens if I forget to - // decrease it again? Note that - // this may lead to extremely - // difficult to find bugs, since - // the place where we have - // forgotten something may be - // very far away from the place - // where the check for zeroness - // of the counter upon - // destruction actually - // fails. This kind of bug is - // very annoying and usually very - // hard to fix. - // - // The solution to this problem - // is to again use some C++ - // trickery: we create a class - // that acts just like a pointer, - // i.e. can be dereferenced, can - // be assigned to and from other - // pointers, and so on. This can - // be done by overloading the - // several dereferencing - // operators of that - // class. Withing the - // constructors, destructors, and - // assignement operators of that - // class, we can however also - // manage increasing or - // decreasing the use counters of - // the objects we point - // to. Objects of that class - // therefore can be used just - // like ordinary pointers to - // objects, but they also serve - // to change the use counters of - // those objects without the need - // for the programmer to do so - // herself. The class that - // actually does all this is - // called ``SmartPointer'' and - // takes as template parameter - // the data type of the object - // which it shall point to. The - // latter type may be any class, - // as long as it is derived from - // the ``Subscriptor'' class. - // - // In the present example - // program, we protect object - // using the pointer to the - // finite element, i.e. the - // following member variable, - // from the situation that for - // some reason the finite element - // pointed to is destroyed while - // still in use. Note that the - // pointer is assigned at - // construction time of this - // object, and destroyed upon - // destruction of this object, so - // the lock on the destruction of - // the finite element object is - // basically all through the - // lifetime of this object. - SmartPointer > fe; - - // The next few member variables - // are unspectacular, since they - // have already been discussed in - // detail: - ConstraintMatrix hanging_node_constraints; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - Vector solution; - Vector system_rhs; -//............. - RefinementMode refinement_mode; -}; - - - -//........ -template -LaplaceProblem::LaplaceProblem (const FiniteElement &fe, - const RefinementMode refinement_mode) : - dof_handler (triangulation), - fe (&fe), - refinement_mode (refinement_mode) -{}; - - - -template -LaplaceProblem::~LaplaceProblem () -{ - dof_handler.clear (); -}; - - - // The following function sets up the - // degrees of freedom, sizes of - // matrices and vectors, etc. Most of - // its functionality has been showed - // in previous examples, the only - // difference being the renumbering - // step. -template -void LaplaceProblem::setup_system () -{ - dof_handler.distribute_dofs (*fe); - // Renumbering the degrees of - // freedom is not overly difficult, - // as long as you use one of the - // algorithms included in the - // library. It requires just one - // line of code, namely the - // following: - DoFRenumbering::Cuthill_McKee (dof_handler); - // Note, however, that when you - // renumber the degrees of freedom, - // you must do so immediately after - // distributing them, since such - // things as hanging nodes, the - // sparsity pattern etc. depend on - // the absolute numbers which are - // altered by renumbering. - // - // Renumbering does not serve any - // specific purpose in this - // example, it is done only for - // exposition of the technique. To - // see the effect of renumbering on - // the sparsity pattern of the - // matrix, refer to the second - // example program. - - // The rest of the function is - // almost identitcally taken over - // from previous examples: - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - hanging_node_constraints.close (); - - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - hanging_node_constraints.condense (sparsity_pattern); - sparsity_pattern.compress(); - - system_matrix.reinit (sparsity_pattern); - - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -}; - - - - // Assembling the system of equations - // for the problem at hand is mostly - // as for the example programs - // before. However, some things have - // changed anyway, so we comment on - // this function fairly extensively. -template -void LaplaceProblem::assemble_system () -{ - // First we need to define objects - // which will be used as quadrature - // formula for domain and face - // integrals. - // - // Note the way in which we define - // a quadrature rule for the faces: - // it is simply a quadrature rule - // for one dimension less! - QGauss3 quadrature_formula; - QGauss3 face_quadrature_formula; - // For simpler use later on, we - // alias the number of quadrature - // points to local variables: - const unsigned int n_q_points = quadrature_formula.n_quadrature_points; - const unsigned int n_face_q_points = face_quadrature_formula.n_quadrature_points; - - // Then we need objects which can - // evaluate the values, gradients, - // etc of the shape functions at - // the quadrature points. While it - // seems that it should be feasible - // to do it with one object for - // both domain and face integrals, - // there is a subtle difference - // since the weights in the domain - // integrals include the measure of - // the cell in the domain, while - // the face integral quadrature - // requires the measure of the face - // in a lower-dimensional - // mannifold. Internally these two - // classes are rooted on a common - // base class which does most of - // the work; that, however, is - // something that you need not - // worry about. - // - // For the domain integrals in the - // bilinear form for Helmholtz's - // equation, we need to compute the - // values and gradients, as well as - // the weights at the quadrature - // points. Furthermore, we need the - // quadrature points on the real - // cell (rather than on the unit - // cell) to evaluate the right hand - // side function. - FEValues fe_values (*fe, quadrature_formula, - UpdateFlags(update_values | - update_gradients | - update_q_points | - update_JxW_values)); - - // For the face integrals, we only - // need the values of the shape - // functions, as well as the - // weights. We also need the normal - // vectors and quadrature points on - // the real cell since we want to - // determine the Neumann values - // from the exact solution object - // (see below). - FEFaceValues fe_face_values (*fe, face_quadrature_formula, - UpdateFlags(update_values | - update_q_points | - update_normal_vectors | - update_JxW_values)); - - // In order to make programming - // more readable below, we alias - // the number of degrees of freedom - // per cell to a local variable, as - // already done for the number of - // quadrature points above: - const unsigned int dofs_per_cell = fe->dofs_per_cell; - - // Then we need some objects - // already known from previous - // examples: An object denoting the - // right hand side function, its - // values at the quadrature points - // on a cell, the cell matrix and - // right hand side, and the indices - // of the degrees of freedom on a - // cell. - RightHandSide right_hand_side; - vector rhs_values (n_q_points); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - vector local_dof_indices (dofs_per_cell); - - // Then we define an object - // denoting the exact solution - // function. We will use it to - // compute the Neumann values at - // the boundary from it. Usually, - // one would of course do so using - // a separate object, in particular - // since the exact solution is not - // known while the Neumann values - // are prescribed. We will, - // however, be a little bit lazy - // and use what we already have in - // information. Real-life programs - // would to go other ways here, of - // course. - Solution exact_solution; - - // Now for the main loop over all - // cells. This is mostly unchanged - // from previous examples, so we - // only comment on the things that - // have changed. - DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - cell_matrix.clear (); - cell_rhs.clear (); - - fe_values.reinit (cell); - const FullMatrix - & shape_values = fe_values.get_shape_values(); - const vector > > - & shape_grads = fe_values.get_shape_grads(); - const vector - & JxW_values = fe_values.get_JxW_values(); - const vector > - & q_points = fe_values.get_quadrature_points(); - - right_hand_side.value_list (q_points, rhs_values); - - for (unsigned int q_point=0; q_point::faces_per_cell; ++face) - if (cell->face(face)->boundary_indicator() == 1) - { - // If we came into here, - // then we have found an - // external face - // belonging to - // Gamma2. Next, we have - // to compute the values - // of the shape functions - // and the other - // quantities which we - // will need for the - // computation of the - // contour integral. This - // is done using the - // ``reinit'' function - // which we already know - // from the ``FEValue'' - // class: - fe_face_values.reinit (cell, face); - - // Then, for simpler - // access, we alias the - // various quantities to - // local variables: - const FullMatrix - & face_shape_values = fe_face_values.get_shape_values(); - const vector - & face_JxW_values = fe_face_values.get_JxW_values(); - const vector > - & face_q_points = fe_face_values.get_quadrature_points(); - const vector > - & face_normal_vectors = fe_face_values.get_normal_vectors (); - - // And we can then - // perform the - // integration by using a - // loop over all - // quadrature points. - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; i boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - Solution(), - boundary_values); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); -}; - - - // Solving the system of equations is - // done in the same way as before. -template -void LaplaceProblem::solve () -{ - SolverControl solver_control (1000, 1e-12); - PrimitiveVectorMemory<> vector_memory; - SolverCG<> cg (solver_control, vector_memory); - - PreconditionRelaxation<> - preconditioner(system_matrix, - &SparseMatrix::template precondition_SSOR, - 1.2); - - cg.solve (system_matrix, solution, system_rhs, - preconditioner); - - hanging_node_constraints.distribute (solution); -}; - - -//..................... -template -void LaplaceProblem::refine_grid () -{ - switch (refinement_mode) - { - case global_refinement: - { - triangulation.refine_global (1); - break; - }; - - case adaptive_refinement: - { - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - KellyErrorEstimator::FunctionMap neumann_boundary; - KellyErrorEstimator::estimate (dof_handler, - QGauss3(), - neumann_boundary, - solution, - estimated_error_per_cell); - - triangulation.refine_and_coarsen_fixed_number (estimated_error_per_cell, - 0.3, 0.03); - - triangulation.execute_coarsening_and_refinement (); - - break; - }; - }; -}; - -//............... -template -void LaplaceProblem::process_solution (const unsigned int cycle) const -{ - Vector difference_per_cell (triangulation.n_active_cells()); - - VectorTools::integrate_difference (dof_handler, - solution, - Solution(), - difference_per_cell, - QGauss3(), - L2_norm); - const double L2_error = difference_per_cell.l2_norm(); - - VectorTools::integrate_difference (dof_handler, - solution, - Solution(), - difference_per_cell, - QGauss3(), - H1_seminorm); - const double H1_error = difference_per_cell.l2_norm(); - - VectorTools::integrate_difference (dof_handler, - solution, - Solution(), - difference_per_cell, - QGauss3(), - Linfty_norm); - const double Linfty_error = difference_per_cell.linfty_norm(); - - cout << "Cycle " << cycle << ':' - << endl - << " Number of active cells: " - << triangulation.n_active_cells() - << endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << endl; - - cout << " L2 error: " << L2_error << endl - << " H1 error: " << H1_error << endl - << " Linfty error: " << Linfty_error << endl; -}; - - - - // The following function is the main - // one which controls the flow of - // execution. The basic layout is as - // in previous examples: an outer - // loop over successively refined - // grids, and in this loop first - // problem setup, assemblage of the - // linear system, solution, and - // postprocessing. -template -void LaplaceProblem::run () -{ - for (unsigned int cycle=0; cycle<9; ++cycle) - { - // The first action in each - // iteration of the outer loop - // is setting up the grid on - // which we will solve in this - // iteration. In the first - // iteration, the coarsest grid - // is generated, in later - // iterations it is refined, - // for which we call the - // ``refine_grid'' function. - if (cycle == 0) - { - // Setting up the coarse - // grid is done as in - // previous examples: we - // first create an initial - // grid, which is the unit - // square [-1,1]x[-1,1] in - // the present case. Then - // we refine it globally a - // specific number of - // times. - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (1); - - // However, here we have to - // do something else in - // addition: mark those - // faces that belong to the - // different components of - // the boundary, Gamma1 and - // Gamma2. We will use the - // following convention: - // Faces belonging to - // Gamma1 will have the - // boundary indicator ``0'' - // (which is the default, - // so we don't have to set - // it explicitely), and - // faces belonging to - // Gamma2 will use ``1'' as - // boundary indicator. - // - // To set these values, we - // loop over all cells, - // then over all faces of a - // given cell, check - // whether it belongs to - // the boundary Gamma2, and - // if so set its boundary - // indicator to ``1''. - // - // It is worth noting that - // we have to loop over all - // cells here, not only the - // active ones. The reason - // is that upon refinement, - // newly created faces - // inherit the boundary - // indicator of their - // parent face. If we now - // only set the boundary - // indicator for active - // faces, coarsen some - // cells and refine them - // later on, they will - // again have the boundary - // indicator of the parent - // cell which we have not - // modified, instead of the - // one we - // intended. Therefore, we - // have to change the - // boundary indicators of - // all faces on Gamma2, - // irrespective whether - // they are active or not. - Triangulation::cell_iterator cell = triangulation.begin (), - endc = triangulation.end(); - for (; cell!=endc; ++cell) - for (unsigned int face=0; face::faces_per_cell; ++face) - if ((cell->face(face)->center()(0) == -1) - || - (cell->face(face)->center()(1) == -1)) - cell->face(face)->set_boundary_indicator (1); - } - else - // If this is not the first - // step, the we call - // ``refine_grid'' to - // actually refine the grid - // according to the - // refinement mode passed to - // the constructor. - refine_grid (); - - // The next steps you already - // know from previous - // examples. This is mostly the - // basic set-up of every finite - // element program: - setup_system (); - - assemble_system (); - solve (); - - // The last step in this chain - // of function calls is usually - // evaluation of the computed - // solution for the quantities - // one is interested in. This - // is done in the following - // function. We pass the number - // of the loop iteration since - // that might be of interest to - // see in the logs which this - // function produces. - process_solution (cycle); - }; - - // After the last iteration we - // output the solution on the - // finest grid. This is done using - // the following sequence of - // statements which you have - // already seen in previous - // examples: - string filename; - switch (refinement_mode) - { - case global_refinement: - filename = "solution-global"; - break; - case adaptive_refinement: - filename = "solution-adaptive"; - break; - default: - Assert (false, ExcInternalError()); - }; - filename += ".gmv"; - - ofstream output (filename.c_str()); - - - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); - data_out.build_patches (); - data_out.write_gmv (output); -}; - - -//................. -int main () -{ - try - { - deallog.depth_console (0); - - FEQ1<2> fe; - LaplaceProblem<2> laplace_problem_2d (fe, LaplaceProblem<2>::adaptive_refinement); - laplace_problem_2d.run (); - } - catch (exception &exc) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Exception on processing: " << endl - << exc.what() << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - return 1; - } - catch (...) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Unknown exception!" << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - return 1; - }; - - return 0; -}; diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-8/.cvsignore b/deal.II/deal.II/Attic/examples/step-by-step/step-8/.cvsignore deleted file mode 100644 index 6d34e1c8cc..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-8/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o *.go Makefile.dep *.gnuplot *.gmv *.eps -step-8 diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-8/Makefile b/deal.II/deal.II/Attic/examples/step-by-step/step-8/Makefile deleted file mode 100644 index 445913746c..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-8/Makefile +++ /dev/null @@ -1,167 +0,0 @@ -# $Id$ - - -# For the small projects Makefile, you basically need to fill in only -# four fields. -# -# The first is the name of the application. It is assumed that the -# application name is the same as the base file name of the single C++ -# file from which the application is generated. -target = $(basename $(shell echo step-*.cc)) - -# The second field determines whether you want to run your program in -# debug or optimized mode. The latter is significantly faster, but no -# run-time checking of parameters and internal states is performed, so -# you should set this value to `on' while you develop your program, -# and to `off' when running production computations. -debug-mode = on - - -# As third field, we need to give the path to the top-level deal.II -# directory. You need to adjust this to your needs. Since this path is -# probably the most often needed one in the Makefile internals, it is -# designated by a single-character variable, since that can be -# reference using $D only, i.e. without the parentheses that are -# required for most other parameters, as e.g. in $(target). -D = ../../../../ - - -# The last field specifies the names of data and other files that -# shall be deleted when calling `make clean'. Object and backup files, -# executables and the like are removed anyway. Here, we give a list of -# files in the various output formats that deal.II supports. -clean-up-files = *gmv *gnuplot *gpl *eps *pov - - - - -# -# -# Usually, you will not need to change something beyond this point. -# -# -# The next statement tell the `make' program where to find the -# deal.II top level directory and to include the file with the global -# settings -include $D/common/Make.global_options - - -# Since the whole project consists of only one file, we need not -# consider difficult dependencies. We only have to declare the -# libraries which we want to link to the object file, and there need -# to be two sets of libraries: one for the debug mode version of the -# application and one for the optimized mode. Here we have selected -# the versions for 2d. Note that the order in which the libraries are -# given here is important and that your applications won't link -# properly if they are given in another order. -# -# You may need to augment the lists of libraries when compiling your -# program for other dimensions, or when using third party libraries -libs.g = $(lib-deal2-2d.g) \ - $(lib-lac.g) \ - $(lib-base.g) -libs.o = $(lib-deal2-2d.o) \ - $(lib-lac.o) \ - $(lib-base.o) - - -# We now use the variable defined above which switch between debug and -# optimized mode to select the correct compiler flags and the set of -# libraries to link with. Included in the list of libraries is the -# name of the object file which we will produce from the single C++ -# file. Note that by default we use the extension .go for object files -# compiled in debug mode and .o for object files in optimized mode. -ifeq ($(debug-mode),on) - libraries = $(target).go $(libs.g) - flags = $(CXXFLAGS.g) -else - libraries = $(target).go $(libs.o) - flags = $(CXXFLAGS.o) -endif - - -# If in multithread mode, add the ACE library to the libraries which -# we need to link with: -ifneq ($(with-multithreading),no) - libraries += $(lib-ACE) -endif - - - -# Now comes the first production rule: how to link the single object -# file produced from the single C++ file into the executable. Since -# this is the first rule in the Makefile, it is the one `make' selects -# if you call it without arguments. -$(target) : $(libraries) - @echo ============================ Linking $@ - @$(CXX) $(flags) -o $@ $^ - - -# To make running the application somewhat independent of the actual -# program name, we usually declare a rule `run' which simply runs the -# program. You can then run it by typing `make run'. This is also -# useful if you want to call the executable with arguments which do -# not change frequently. You may then want to add them to the -# following rule: -run: $(target) - @echo ============================ Running $< - @./$(target) - - -# As a last rule to the `make' program, we define what to do when -# cleaning up a directory. This usually involves deleting object files -# and other automatically created files such as the executable itself, -# backup files, and data files. Since the latter are not usually quite -# diverse, you needed to declare them at the top of this file. -clean: - -rm -f *.o *.go *~ Makefile.dep $(target) $(clean-up-files) - - -# Since we have not yet stated how to make an object file from a C++ -# file, we should do so now. Since the many flags passed to the -# compiler are usually not of much interest, we suppress the actual -# command line using the `at' sign in the first column of the rules -# and write the string indicating what we do instead. -%.go : %.cc - @echo ==============debug========= $( Makefile.dep - -# To make the dependencies known to `make', we finally have to include -# them: -include Makefile.dep - - diff --git a/deal.II/deal.II/Attic/examples/step-by-step/step-8/step-8.cc b/deal.II/deal.II/Attic/examples/step-by-step/step-8/step-8.cc deleted file mode 100644 index 51f6741f93..0000000000 --- a/deal.II/deal.II/Attic/examples/step-by-step/step-8/step-8.cc +++ /dev/null @@ -1,1077 +0,0 @@ -/* $Id$ */ -/* Author: Wolfgang Bangerth, University of Heidelberg, 2000 */ - - // As usual, the first few include - // files are already known, so we - // will not comment on them further. -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - // In this example, we need - // vector-valued finite elements. The - // support for these can be found in - // the following include file: -#include - // We will compose the vector-valued - // finite elements from regular Q1 - // elements which can be found here, - // as usual: -#include - - // This again is C++: -#include - - - // The main class is, except for its - // name, almost unchanged with - // respect to the step-6 example. The - // only change is the use of a - // different class for the ``fe'' - // variable. -template -class ElasticProblem -{ - public: - ElasticProblem (); - ~ElasticProblem (); - void run (); - - private: - void setup_system (); - void assemble_system (); - void solve (); - void refine_grid (); - void output_results (const unsigned int cycle) const; - - Triangulation triangulation; - DoFHandler dof_handler; - - // Instead of a concrete finite - // element class such as - // ``FEQ1'', we now use a more - // generic one, ``FESystem''. In - // fact, it is not a finite - // element itself, but rather a - // class that can be used to - // stack several usual elements - // together to form one - // vector-valued finite - // element. In our case, we will - // compose the vector-valued - // element of ``FEQ1'' objects, - // as shown below in the - // constructor of this class. - FESystem fe; - - ConstraintMatrix hanging_node_constraints; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - Vector solution; - Vector system_rhs; -}; - - - // Before going over to the - // implementation of the main class, - // we declare and define the class - // which describes the right hand - // side. This time, the right hand - // side is vector-valued, as is the - // solution, so we will describe the - // new elements in some more detail. -template -class RightHandSide : public Function -{ - public: - // The first thing is that - // vector-valued functions have a - // constructor, since they need - // to pass down to the base class - // of how many components the - // function consists. The default - // value in the constructor of - // the base class is one, so we - // need not define a constructor - // for the usual scalar function. - RightHandSide (); - - // The next function is a - // replacement for the ``value'' - // function of the previous - // examples. There, a second - // parameter ``component'' was - // given, which denoted which - // component was requested. Here, - // we implement a function that - // returns the whole vector of - // values at the given place at - // once. - virtual void vector_value (const Point &p, - Vector &values) const; - - // Then, in analogy to the - // ``value_list'' function, there - // is a function - // ``vector_value_list'', which - // returns the values of the - // vector-valued function at - // several points at once: - virtual void vector_value_list (const vector > &points, - vector > &value_list) const; -}; - - - // This is the constructor of the - // right hand side class. As said - // above, it only passes down to the - // base class the number of - // components, which is ``dim'' in - // the present case. Note that - // although the implementation is - // very short here, we do not move it - // into the class declaration, since - // our style guides require that - // inside the class declaration only - // declarations have to happen and - // that definitions are always to be - // found outside. -template -RightHandSide::RightHandSide () : - Function (dim) -{}; - - - // This is the function that returns - // the whole vector of values at the - // point ``p'' at once: -template -inline -void RightHandSide::vector_value (const Point &p, - Vector &values) const -{ - // To prevent cases where the - // return value has not previously - // been set to the right size - // (which is kind of a convention - // in the deal.II library), we test - // for this case and otherwise - // throw an exception: - Assert (values.size() == dim, - ExcVectorHasWrongSize (values.size(), dim)); - // Likewise, if by some accident - // someone tried to compile and run - // the program in only one space - // dimension (in which the elastic - // equations do not make much sense - // since they reduce to the - // ordinary Laplace equation), we - // terminate the program if the - // dimension is not as expected. - Assert (dim >= 2, ExcInternalError()); - - // The rest of the function is as - // would probably be expected given - // the form of the right hand side - // function. First we define the - // centers of the two points around - // which are the sources of - // x-displacement, i.e. (0.5,0) and - // (-0.5,0). Note that upon - // construction of the ``Point'' - // objects, all components are set - // to zero. - Point point_1, point_2; - point_1(0) = 0.5; - point_2(0) = -0.5; - - // If now the point ``p'' is in the - // circle of radius 0.2 around one - // of these points, then set the - // force in x-direction to one, - // otherwise to zero: - if (((p-point_1).square() < 0.2*0.2) || - ((p-point_2).square() < 0.2*0.2)) - values(0) = 1; - else - values(0) = 0; - - // Likewise, if ``p'' is in the - // vicinity of the origin, then set - // the y-force to 1, otherwise to - // zero: - if (p.square() < 0.2*0.2) - values(1) = 1; - else - values(1) = 0; -}; - - - - // Now, this is the function of the - // right hand side class that returns - // the values at several points at - // once. -template -void RightHandSide::vector_value_list (const vector > &points, - vector > &value_list) const -{ - // First we define an abbreviation - // for the number of points which - // we shall work on: - const unsigned int n_points = points.size(); - - // Then we check whether the number - // of output slots has been set - // correctly, i.e. to the number of - // input points: - Assert (value_list.size() == n_points, - ExcVectorHasWrongSize (value_list.size(), n_points)); - - // Finally we treat each of the - // points. In one of the previous - // examples, we have explained why - // the - // ``value_list''/``vector_value_list'' - // function had been introduced: to - // prevent us from calling virtual - // functions too frequently. On the - // other hand, we now need to - // implement the same function - // twice, which can lead to - // confusion if one function is - // changed but the other is - // not. However, we can prevent - // this situation using the - // following construct: - for (unsigned int p=0; p::vector_value (points[p], - value_list[p]); - // It calls the ``vector_value'' - // function defined above for each - // point, and thus preempts all - // chances for inconsistency. It is - // important to note how the - // function was called: using the - // full class qualification using - // ``RightHandSide::'', since this - // calls the function directly and - // not using the virtual function - // table. The call is thus as fast - // as a call to any non-virtual - // function. In addition, we have - // declared the ``vector_value'' - // function ``inline'', i.e. the - // compiler can remove the function - // call altogether and the - // resulting code can in principle - // be as fast as if we had - // duplicated the code. -}; - - - - -template -ElasticProblem::ElasticProblem () : - dof_handler (triangulation), - // As said before, we - // would like to - // construct one - // vector-valued - // finite element as - // outer product of - // several scala - // finite - // elements. Of - // course, the number - // of scalar finite - // element we would - // like to stack - // together equals - // the number of - // components the - // solution function - // has, which is - // ``dim'' since we - // consider - // displacement in - // each space - // direction. The - // ``FESystem'' class - // can handle this: - // we pass it the - // finite element of - // which we would - // like to compose - // the system of, and - // how often it shall - // be repeated: - fe (FEQ1(), dim) - // In fact, the ``FESystem'' class - // has several more constructors - // which can perform more complex - // operations that just stacking - // together several scalar finite - // elements of the same type into - // one; we will get to know these - // possibilities in later examples. - // - // It should be noted that the - // ``FESystem'' object thus created - // does not actually use the finite - // element which we have passed to it - // as first parameter. We could thus - // use an anonymous object created - // in-place. The ``FESystem'' - // constructor only needs the - // parameter to deduce the type of - // the finite element from this and - // then creates objects of the - // underlying finite element type - // itself. -{}; - - - -template -ElasticProblem::~ElasticProblem () -{ - dof_handler.clear (); -}; - - - // Setting up the system of equations - // is equal to the function used in - // the step-6 example. The - // ``DoFHandler'' class and all other - // classes used take care of the - // vector-valuedness of the finite - // element themselves (in fact, the - // do not do so, since they only take - // care how many degrees of freedom - // there are per vertex, line and - // cell, and they do not askwhat they - // represent, i.e. whether the finite - // element under consideration is - // vector-valued or whether it is, - // for example, a scalar Hermite - // element with several degrees of - // freedom on each vertex). -template -void ElasticProblem::setup_system () -{ - dof_handler.distribute_dofs (fe); - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - hanging_node_constraints.close (); - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - // When making the sparsity - // pattern, there is some potential - // for optimization if not all - // components couple to all - // others. However, this is not the - // case for the elastic equations, - // so we use the standard call: - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - - hanging_node_constraints.condense (sparsity_pattern); - - sparsity_pattern.compress(); - - system_matrix.reinit (sparsity_pattern); - - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -}; - - - // The big changes in this program - // are in the creation of matrix and - // right hand side, since they are - // problem-dependent. We will go - // through that process step-by-step, - // since it is a bit more complicated - // than in previous examples. -template -void ElasticProblem::assemble_system () -{ - // First thing: the quadrature - // formula does not need - // modification since we still deal - // with bilinear functions. - QGauss2 quadrature_formula; - // Also, the ``FEValues'' objects - // takes care of everything for us - // (or better: it does not really - // so; as in the comment in the - // function setting up the system, - // here as well the ``FEValues'' - // object computes the same data on - // each cell, but it has some - // functionality to access data - // stored inside the finite element - // where they are precomputed upon - // construction). - FEValues fe_values (fe, quadrature_formula, - UpdateFlags(update_values | - update_gradients | - update_q_points | - update_JxW_values)); - - // The number of degrees of freedom - // per cell we now obviously ask - // from the composed finite element - // rather than from the underlying - // scalar Q1 element. Here, it is - // ``dim'' times the number of - // degrees of freedom per cell of - // the Q1 element, but this is not - // something we need to care about. - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.n_quadrature_points; - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - vector local_dof_indices (dofs_per_cell); - - // As was shown in previous - // examples as well, we need a - // place where to store the values - // of the coefficients at all the - // quadrature points on a cell. In - // the present situation, we have - // two coefficients, lambda and mu. - vector lambda_values (n_q_points); - vector mu_values (n_q_points); - - // Well, we could as well have - // omitted the above two arrays - // since we will use constant - // coefficients for both lambda and - // mu, which can be declared like - // this. They both represent - // functions always returning the - // constant value 1.0. Although we - // could omit the respective - // factors in the assemblage of the - // matrix, we use them here for - // purpose of demonstration. - ConstantFunction lambda(1.), mu(1.); - - // Then again, we need to have the - // same for the right hand - // side. This is exactly as before - // in previous examples. However, - // we now have a vector-valued - // right hand side, which is why - // the data type of the - // ``rhs_values'' array is - // changed. We initialize it by - // ``n_q_points'' elements, each of - // which is a ``Vector'' - // with ``dim'' elements. - RightHandSide right_hand_side; - vector > rhs_values (n_q_points, - Vector(dim)); - - - // Now we can begin with the loop - // over all cells: - DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - cell_matrix.clear (); - cell_rhs.clear (); - - fe_values.reinit (cell); - - // As in previous examples, we - // define some abbreviations - // for the various data that - // the ``FEValues'' class - // offers: - const FullMatrix - & shape_values = fe_values.get_shape_values(); - const vector > > - & shape_grads = fe_values.get_shape_grads(); - const vector - & JxW_values = fe_values.get_JxW_values(); - const vector > - & q_points = fe_values.get_quadrature_points(); - - // Next we get the values of - // the coefficients at the - // quadrature points: - lambda.value_list (q_points, lambda_values); - mu.value_list (q_points, mu_values); - - // Then assemble the entries of - // the local stiffness matrix - // and right hand side - // vector. This follows almost - // one-to-one the pattern - // described in the - // introduction of this example - // and will not comment much on - // this. - for (unsigned int i=0; i'', of - // which the first element - // is ``comp(i)'' and the - // second is the value - // ``base(i)'' also noted - // in the text. You will - // rather seldom need to - // access this second - // value, but the first is - // important when using - // vector valued elements. - - for (unsigned int j=0; jget_dof_indices (local_dof_indices); - for (unsigned int i=0; i boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(dim), - boundary_values); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); -}; - - - - // The solver does not care about - // where the system of equations - // comes, as long as it stays - // positive definite and symmetric - // (which are the requirements for - // the use of the CG solver), which - // the system is. Therefore, we need - // not change anything. -template -void ElasticProblem::solve () -{ - SolverControl solver_control (1000, 1e-12); - PrimitiveVectorMemory<> vector_memory; - SolverCG<> cg (solver_control, vector_memory); - - PreconditionRelaxation<> - preconditioner(system_matrix, - &SparseMatrix::template precondition_SSOR, - 1.2); - - cg.solve (system_matrix, solution, system_rhs, - preconditioner); - - hanging_node_constraints.distribute (solution); -}; - - - - // The function that does the - // refinement of the grid is the same - // as in the step-6 example. The - // quadrature formula is adapted to - // the linear elements again. Note - // that the error estimator by - // default adds up the estimated - // obtained from all components of - // the finite element solution, that - // is it uses the displacement in all - // directions with the same - // weight. If we would like the grid - // to be adapted to the - // x-displacement only, we could pass - // the function an additional - // parameter which tells it to do so - // and do not consider the - // displacements in all other - // directions for the error - // indicators. -template -void ElasticProblem::refine_grid () -{ - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - KellyErrorEstimator::FunctionMap neumann_boundary; - KellyErrorEstimator::estimate (dof_handler, - QGauss2(), - neumann_boundary, - solution, - estimated_error_per_cell); - - triangulation.refine_and_coarsen_fixed_number (estimated_error_per_cell, - 0.3, 0.03); - - triangulation.execute_coarsening_and_refinement (); -}; - - - // The output happens mostly as has - // been shown in previous examples - // already. The only difference is - // not that the solution function is - // vector values. The ``DataOut'' - // class takes care of this - // automatically, but we have to give - // each component of the solution - // vector a different name. -template -void ElasticProblem::output_results (const unsigned int cycle) const -{ - string filename = "solution-"; - filename += ('0' + cycle); - Assert (cycle < 10, ExcInternalError()); - - filename += ".gmv"; - ofstream output (filename.c_str()); - - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - - - - // As said above, we need a - // different name for each - // component of the solution - // function. To pass one name for - // each component, a vector of - // strings is used. Since the - // number of components is the same - // as the number of dimensions we - // are working in, the following - // ``switch'' statement is used. - // - // We note that some graphics - // programs have restriction as to - // what characters are allowed in - // the names of variables. The - // library therefore supports only - // the minimal subset of these - // characters that is supported by - // all programs. Basically, these - // are letters, numbers, - // underscores, and some other - // characters, but in particular no - // whitespace and minus/hyphen. The - // library will throw an exception - // otherwise, at least if in debug - // mode. - vector solution_names; - switch (dim) - { - case 1: - solution_names.push_back ("displacement"); - break; - case 2: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - break; - case 3: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - solution_names.push_back ("z_displacement"); - break; - // It is good style to - // let the program die if - // we run upon a case - // which we did not - // consider. Remember - // that the ``Assert'' - // macro throws an - // exception if the - // condition in the first - // parameter is not - // satisfied. Of course, - // the condition - // ``false'' can never be - // satisfied, so the - // program will always - // abort whenever it gets - // to this statement: - default: - Assert (false, ExcInternalError()); - }; - - // After setting up the names for - // the different components of the - // solution vector, we can add the - // solution vector to the list of - // data vectors scheduled for - // output. Note that the following - // function takes a vector of - // strings as second argument, - // whereas the one which we have - // used in all previous examples - // accepted a string there. In - // fact, the latter function is - // only a shortcut for the function - // which we call here: it puts the - // single string that is passed to - // it into a vector of strings with - // only one element and forwards - // that to the other function. - data_out.add_data_vector (solution, solution_names); - data_out.build_patches (); - data_out.write_gmv (output); -}; - - - -template -void ElasticProblem::run () -{ - for (unsigned int cycle=0; cycle<8; ++cycle) - { - cout << "Cycle " << cycle << ':' << endl; - - if (cycle == 0) - { - // As in previous examples, - // we use the unit square - // (or cube) as domain. - GridGenerator::hyper_cube (triangulation, -1, 1); - // This time, we have to - // refine the coarse grid - // twice before we first - // solve on it. The reason - // is the following: we use - // the ``Gauss2'' - // quadrature formula for - // integration of the right - // hand side; that means - // that there are four - // quadrature points on - // each cell (in 2D). If we - // only refine the initial - // grid once globally, then - // there will be only four - // quadrature points in - // each direction on the - // domain. However, the - // right hand side function - // was chosen to be rather - // localized and in that - // case all quadrature - // points lie outside the - // support of the right - // hand side function. The - // right hand side vector - // will then contain only - // zeroes and the solution - // of the system of - // equations is the zero - // vector, i.e. a finite - // element function that it - // zero everywhere. We - // should not be surprised - // about such things - // happening, since we have - // chosen an initial grid - // that is totally - // unsuitable for the - // problem at hand. - // - // The unfortunate thing is - // that if the discrete - // solution is constant, - // then the error - // indicators computed by - // the - // ``KellyErrorEstimator'' - // class are zero for each - // cell as well, and the - // call to - // ``refine_and_coarsen_fixed_number'' - // of the ``triangulation'' - // object will not flag any - // cells for refinement - // (why should it if the - // indicated error is zero - // for each cell?). The - // grid in the next - // iteration will therefore - // consist of four cells - // only as well, and the - // same problem occurs - // again. - // - // The conclusion needs to - // be: while of course we - // will not choose the - // initial grid to be - // well-suited for the - // accurate solution of the - // problem, we must at - // least choose it such - // that it has the chance - // to capture the most - // striking features of the - // solution. In this case, - // it needs to be able to - // see the right hand - // side. Thus, we refine - // twice globally. - triangulation.refine_global (2); - } - else - refine_grid (); - - cout << " Number of active cells: " - << triangulation.n_active_cells() - << endl; - - setup_system (); - - cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << endl; - - assemble_system (); - solve (); - output_results (cycle); - }; -}; - - - // The main function is again exactly - // like in step-6 (apart from the - // changed class names, of course). -int main () -{ - try - { - deallog.depth_console (0); - - ElasticProblem<2> elastic_problem_2d; - elastic_problem_2d.run (); - } - catch (exception &exc) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Exception on processing: " << endl - << exc.what() << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - - return 1; - } - catch (...) - { - cerr << endl << endl - << "----------------------------------------------------" - << endl; - cerr << "Unknown exception!" << endl - << "Aborting!" << endl - << "----------------------------------------------------" - << endl; - return 1; - }; - - return 0; -}; diff --git a/deal.II/doc/news/c-5.0.html b/deal.II/doc/news/c-5.0.html new file mode 100644 index 0000000000..c49fb65be7 --- /dev/null +++ b/deal.II/doc/news/c-5.0.html @@ -0,0 +1,540 @@ + + + + + The deal.II news page + + + + + + +

Changes after Version 5.0

+ +

+This is the list of changes made after the release of +deal.II version 5.0. It is subdivided into changes +made to the three sub-libraries base, +lac, and deal.II, as well as +changes to the general infrastructure, +documentation, etc. +

+ +

+All entries are signed with the names of the author. Regular +contributor's names are abbreviated by WB (Wolfgang Bangerth), GK +(Guido Kanschat), RH (Ralf Hartmann). +

+ + + +

Incompatibilities

+ +

+Following are a few modifications to the library that unfortunately +are incompatible with previous versions of the library, but which we +deem necessary for the future maintainability of the +library. Unfortunately, some of these changes will require +modifications to application programs. We apologize for the +inconvenience this causes. +

+ + +
    + +
  1. Changed: The call MGTransferBlock::build_matrices and to the same function in + derived classes receaves a DoFHandler as + an additional argument. It is needed to prebuild the transfer + between vectors and multigrid vectors, since the loop over all cells + is to slow. +
    + (GK 2004/08/27) +

    + +
  2. + Changed: Previously, parallel PETSc matrices only took the sizes of the + matrix and the number of rows to be stored locally as arguments. They + chose the partitioning of columns to be the same as that for the + rows. However, this does not work for non-quadratic parallel matrices, + and leads to very hard to find errors. Therefore, all constructors and + reinit functions of the parallel sparse matrix classes now take an + additional argument indicating the partitioning of columns of the matrix. +
    + (WB 2004/06/02) +

    +
+ + + + +

General

+ +
    +
  1. + Fixed: Configuration scripts were changed to allow any file suffix for + shared and static libraries to allow in the future the creation of dlls + under cygwin. +
    + (Ralf B. Schulz, 2004/12/17) +

    + +
  2. + New: Configuration now detects the Intel Fortran compiler and can set + compilation flags accordingly. +
    + (WB 2004/11/04) +

    + +
  3. + Extended: deal.II 5.0.0 didn't work with the PETSc release 2.2.1 (which + came out after we released version 5.0.0). This should now be fixed: + deal.II can be linked against both PETSc 2.2.0 and 2.2.1. +
    + (WB 2004/10/07) +

    + +
  4. + Improved: The documentation generated by Doxygen now states the header + file defining a class. Furthermore, all deal.II + exceptions are listed as classes and will soon be found in the module + Exceptions. +
    + (GK 2004/09/16) +

    + +
  5. + New: configuration option --with-umfpack for using the + UMFPack version and enabling the class SparseDriectUMFPACK, both included by + Wolfgang. +
    + (GK 2004/08/25) +

    + +
  6. + Fixed: None of the formulas in the step-8 tutorial program web + page were visible. This is now fixed. +
    + (WB 2004/06/29) +

    + +
+ + + +

base

+ +
    + +
  1. +
  2. + New: The MultithreadInfo class now also detects multiple + processors on Mac OS X. +
    + (Helmut Müller 2004/11/29) +

    + +
  3. + New: The + TableHandler::write_tex now accepts the additional boolean + argument with_header which is set to + true by default and tells the function whether to add the latex + header and footer (i.e. the \documentclass{...}, + \begin{document} and \end{document} stuff) to the table.
    + In addition to this, there are two new members in the above class: + TableHandler::tex_set_table_caption and + TableHandler::tex_set_table_label to + add a caption and a label to the tex generated table. +
    + (Luca Heltai 2004/10/29) +

    + +
  4. + Fixed: DataOutBase<2,3>::write_tecplot sometimes did not + write the variable name z when only outputting faces of + cells. This is now fixed. +
    + (RH 2004/10/29) +

    + +
  5. + New: a class template VectorSlice + allows access to consecutive portions of a vector. +
    + (GK 2004/09/16) +

    + +
  6. New: The classes TableIndices<N> and Table<N,T> are now implemented also + for N=7. The Table<N,T> class represents an + N-dimensional array and might replace the + N-times-nested-use of the std::vector class. +
    + (RH 2004/08/13) +

    + +
  7. + New: Class TableIndices now has + operators that check for equality and inequality of objects. +
    + (WB 2004/07/28) +

    + +
  8. + New: A class PointerComparison for comparing + pointers that may or may not be of the same type. +
    + (WB 2004/06/22) +

    + +
  9. + Removed: The ParameterHandler class contained a remnant from + back in 1997 when it was modeled after a similar class in DiffPack: it + had a status flag that one could obtain via the ok function. It was never really used for + anything, and has thus finally been removed. The ok is consequently gone as well. +
    + (WB 2004/06/06) +

    + +
  10. + New: An object of the new ConditionalOStream class allows to print + to an output stream depending on a condition, which is active + or not. This is particular useful for parallel computations + when only one process should print to standard output, while in all + other processes we simply want output suppressed. +
    + (RH 2004/05/26) +

    +
+ + + + +

lac

+ +
    +
  1. + Fixed: PETSc had changed some of its interfaces in version 2.2.1, which + prevented deal.II from working with it (it couldn't be compiled at + all). We have added checks that make sure that it can now again be + compiled with PETSc versions 2.1.6, 2.2.0, and 2.2.1. +
    + (WB 2004/10/07) +

    + +
  2. + New: A class PreconditionLU which + provides a wrapper to the complete LU decomposition + preconditioner of PETSc. Furthermore a class + SolverPreOnly was implemented. It + is a wrapper for the PETSc solver type KSPPREONLY, which + only applies the preconditioner. In conjunction with + PreconditionLU this provides a + simple direct solver, which could be used for small to medium + sized problems on a single processor machine. +
    + (Oliver Kayser-Herold 2004/07/27) +

    + +
  3. + Improved: VectorTools::point_difference + used to use an algorithm to find the cell the given point is in that + was linear in the total number of active cells. It has been rewritten + to use + GridTools::find_active_cell_around_point + which is only logarithmic in its complexity. +
    + (WB 2004/07/07) +

    + +
  4. + Fixed: Block matrix iterators could get into all kind of interesting + (and invalid) states when some of the blocks had empty rows. In this + common case, we would frequently skip elements when looping over the + elements of a block matrix. These cases should now be fixed. +
    + (WB 2004/07/07) +

    + +
  5. + Removed: Block matrix iterators used to have a function + it->index() that returned something like + the position within a row. However, this was fragile, and has been + removed. If you want an ordering of elements within a row, use the + operator < to compare iterators. +
    + (WB 2004/07/07) +

    + +
  6. + Improved: The PETScWrappers::SolverGMRES::AdditionalData + class now takes an additional flag indicating the use of left + or right preconditioning. +
    + (RH 2004/06/24) +

    + +
  7. + Improved: The SparseDirectMA27 class can now + handle float as well as double input matrices and vectors. +
    + (WB 2004/06/23) +

    + +
  8. + Fixed: The SparseMatrix::vmult-type functions + vmult, Tvmult, vmult_add, and + Tvmult_add take two different template arguments for input + and output vectors, but were only instantiated in case the arguments + were the same, and could also not be compiled if they weren't. Both + problems are now fixed. +
    + (WB 2004/06/22) +

    + +
  9. + Improved: The CompressedSparsityPattern + class used one of the C++ standard containers to store the column + indices of nonzero entries in a sparse matrix. This proved to be + inefficient since it requires the allocation of 20 bytes each time an + element was added, which for large matrices can be millions of + times. The new storage format uses a more compact data structure, and a + cache that requires memory allocation only every 8 additions, on + average. This should significantly reduce the total amount of memory + required as well as memory fragmentation. It also cuts run-time for + element addition by more than half. +
    + (WB 2004/06/21) +

    + +
  10. + Fixed: CompressedSparsityPattern::max_entries_per_row() + ignored the first row and thus sometimes returned a value that was too + low. This is now fixed. +
    + (WB 2004/06/21) +

    + +
  11. + Fixed: BlockSparseMatrix::clear() did not + do what it was supposed to do: it emptied the individual blocks and + removed them, but it still kept the number of blocks + unchanged. Subsequent accesses to these blocks, or to their information + yielded segmentation faults. +
    + (WB 2004/06/02) +

    + +
  12. + New: Block matrices and vectors have been factored into abstract base + classes and concrete implementation classes. While the previous classes + still exist in all their functionality, this allowed us to now also + have block matrices and vectors for PETSc sequential and parallel + objects. +
    + (WB 2004/06/02) +

    + +
  13. + New: Vector and block vector classes had a commented out template + constructor constructing such a vector from a vector object with a + different template argument, for example constructing a Vector<double> from a Vector<float>. This constructor has been + commented out a long time ago due to a compiler bug in which the + explicit keyword on template constructors was ignored, a + fact that is dangerous since it may lead the compiler to generate + temporaries without our ado. This bug is now detected during + configuration time of the library, and these constructors are available + whenever the compiler does not contain this bug. +
    + (WB 2004/06/02) +

    + +
  14. + New: In analogy to the PETSc vector classes, the PETSc matrix classes + now also have member functions local_range, + in_local_range, and local_size. +
    + (WB 2004/06/02) +

    + +
  15. + New: Parallel PETSc matrix and vector classes now have member functions + get_mpi_communicator that returns the MPI + communicator object these objects operate on. +
    + (WB 2004/06/02) +

    + +
  16. + Changed: The PETSc linear solver classes now take a constant, rather + than a nonconstant reference to the MPI communicator to be used. This + prevents some unnecessary compiler problems in conjunction with the new + get_mpi_communicator function. +
    + (WB 2004/06/02) +

    + +
  17. + New: Parallel and sequential PETSc sparse matrix classes can now be + initialized (via either constructor or reinit functions) with a + compressed sparsity pattern object, allowing for more efficient + preallocation of nonzero entries. +
    + (WB 2004/06/02) +

    +
+ + + + +

deal.II

+ +
    +
  1. + Fixed: In rare cases of 3d meshes with a certain topology, we triggered + an assertion in + TriaAccessor::neighbor_child_on_subface. It turns out that + the code actually computes the correct answer, but the assertion had a + condition that doesn't always have to be satisfied. This bogus + assertion is now fixed. +
    + (WB 2004/11/12) +

    + +
  2. + Fixed: The GridGenerator::cylinder function in 3d + assigned the wrong boundary value to the top and bottom part of + the cylinder if the half length of the cylinder was not equal + to 1. This is now fixed. +
    + (Ralf Schulz 2004/10/27) +

    + +
  3. + Improved: Now the FE_Q class supports hanging node + constraints for elements of arbitrary polynomial degree also in + 3D. +
    + (Oliver Kayser-Herold 2004/10/21) +

    + +
  4. + Fixed: The StraightBoundary<3>::get_new_point_on_quad + did not work on general grids. This is now fixed. +
    + (RH 2004/10/15) +

    + +
  5. + Improved: The CylinderBoundary + represented the hull of a circular tubes along the x-axis. It + is now extended to allow for circular tubes also along the y- + and z-axis. +
    + (RH 2004/10/15) +

    + +
  6. + Fixed: The ConstraintMatrix class had some + algorithms that were linear in the number of constraints. Since these + functions had to be called for each constraint, this resulted in a + quadratic behavior. To make things worse, these algorithms traversed + large memory blocks leading to a vast number of cache misses which made + them really slow. This is now fixed: the algorithm is O(1) and should + only access single elements in memory, and one 3d testcase is now a + full 5 per cent faster on about 10 minutes runtime. +
    + (WB 2004/10/04) +

    + +
  7. + New: The GridOut::write_gnuplot + function now supports curved boundaries also for dim==3. +
    + (RH 2004/09/20) +

    + +
  8. + Fixed: The documentation of the Triangulation class mentioned that no + places in the library use or touch the user pointers. That is + wrong, the SolutionTransfer class + actually does. This is now properly documented. +
    + (WB 2004/09/15) +

    + +
  9. + Fixed: The DerivativeApproximation::approximate_second_derivative + function produced wrong results ("not a number", or an exception instead of + "zero") if the field it was given was constant. This is now fixed. +
    + (WB 2004/08/05) +

    + +
  10. + New: The MatrixTools::apply_boundary_values + that works on block matrices and vectors is now templatized over the + number type, i.e. it also works for float matrices and vectors. +
    + (WB 2004/06/22) +

    + +
  11. + New: The new FEValuesBase::get_cell + function returns the current cell, i.e. the latest cell the + FEValues object was reinited with. +
    + (RH 2004/06/22) +

    + +
  12. + Changed: The MatrixTools::local_apply_boundary_values function + used to set the diagonal entries of boundary nodes to one. This is a + really bad choice, and the algorithm we use now is much better. There + are still a few problems when using this function for matrices that + will be solved as a Schur complement; there is nothing we can do in + that function in these cases, since we are lacking fundamental pieces + of information, but the cases where this applies and the strategies to + work with this problem anyway are now well documented in the + documentation of the MatrixTools class. +
    + (WB 2004/06/07) +

    + +
  13. + Fixed: The GridGenerator::cylinder_shell + function generated cells were all inside-out, i.e. had negative + Jacobians. In usual finite element computations this simply leads to + all components of the linear system being negated, so it is not + harmful. It is fixed now anyway. +
    + (WB 2004/06/07) +

    +
+ +
+Last update $Date$ + + + diff --git a/deal.II/lac/Attic/doc/Makefile b/deal.II/lac/Attic/doc/Makefile deleted file mode 100644 index 974eeec17f..0000000000 --- a/deal.II/lac/Attic/doc/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -# $Id$ - - -KDOCFLAGS = -I../../../deal.II/doc/kdoc ../../../deal.II/doc/kdoc/kdoc -a -p -kdoc.inc = $(wildcard ../include/lac/*.h) - - - -doc-html: kdoc - - -# make kdoc doc; make sure that the *.kdoc files exist by -# using the dependancies and the following rules. -kdoc: $(kdoc.inc) - cd doc.kdoc ; perl $(KDOCFLAGS) -ulac -dlac \ - lac $(kdoc.inc:..%=../..%) - -cvslog: - @cd .. ; ../deal.II/doc/cvslog/cvs2html -o doc/cvslog/lac #-a -k - -clean: - rm -f doc.kdoc/lac/* \ - cvslog/* \ - *~ - - -.PHONY: doc-html kdoc cvslog clean diff --git a/deal.II/lac/Attic/doc/cvslog/.cvsignore b/deal.II/lac/Attic/doc/cvslog/.cvsignore deleted file mode 100644 index 2d19fc766d..0000000000 --- a/deal.II/lac/Attic/doc/cvslog/.cvsignore +++ /dev/null @@ -1 +0,0 @@ -*.html diff --git a/deal.II/lac/Attic/doc/doc.kdoc/.cvsignore b/deal.II/lac/Attic/doc/doc.kdoc/.cvsignore deleted file mode 100644 index 0a824cbf2d..0000000000 --- a/deal.II/lac/Attic/doc/doc.kdoc/.cvsignore +++ /dev/null @@ -1,2 +0,0 @@ -lac.kdoc -lac diff --git a/deal.II/lac/include/lac/solver_gmres.h b/deal.II/lac/include/lac/solver_gmres.h index 45e2da5cca..31dbf5dfb0 100644 --- a/deal.II/lac/include/lac/solver_gmres.h +++ b/deal.II/lac/include/lac/solver_gmres.h @@ -14,6 +14,7 @@ #define __deal2__solver_gmres_h +/*---------------------------- solver_pgmres.h ---------------------------*/ #include #include @@ -21,97 +22,11 @@ #include #include #include -#include #include #include -/*!@addtogroup Solvers */ -/*@{*/ -namespace internal -{ - /** - * A namespace for a helper class - * to the GMRES solver. - */ - namespace SolverGMRES - { - /** - * Class to hold temporary - * vectors. This class - * automatically allocates a new - * vector, once it is needed. - * - * A future version should also - * be able to shift through - * vectors automatically, - * avoiding restart. - */ - - template - class TmpVectors - { - public: - /** - * Constructor. Prepares an - * array of @p VECTOR of - * length @p max_size. - */ - TmpVectors(const unsigned int max_size, - VectorMemory &vmem); - - /** - * Delete all allocated vectors. - */ - ~TmpVectors(); - - /** - * Get vector number - * @p i. If this vector was - * unused before, an error - * occurs. - */ - VECTOR& operator[] (const unsigned int i) const; - - /** - * Get vector number - * @p i. Allocate it if - * necessary. - * - * If a vector must be - * allocated, @p temp is - * used to reinit it to the - * proper dimensions. - */ - VECTOR& operator() (const unsigned int i, - const VECTOR &temp); - - private: - /** - * Pool were vectors are - * obtained from. - */ - VectorMemory &mem; - - /** - * Field for storing the - * vectors. - */ - std::vector data; - - /** - * Offset of the first - * vector. This is for later - * when vector rotation will - * be implemented. - */ - unsigned int offset; - }; - } -} - -//! Generalized minimal residual method. /** * Implementation of the Restarted Preconditioned Direct Generalized * Minimal Residual Method. The stopping criterion is the norm of the @@ -122,7 +37,7 @@ namespace internal * the number of iterations needed to solve the problem to the given * criterion, an intermediate solution is computed and a restart is * performed. If you don't want to use the restarted method, you can - * limit the number of iterations (stated in the @p SolverControl + * limit the number of iterations (stated in the @p{SolverControl} * object given to the constructor) to be below the number of * temporary vectors minus three. Note the subtraction, which is due * to the fact that three vectors are used for other purposes, so the @@ -145,33 +60,35 @@ namespace internal * possibilities is to use a good preconditioner. * * Like all other solver classes, this class has a local structure - * called @p AdditionalData which is used to pass additional + * called @p{AdditionalData} which is used to pass additional * parameters to the solver, like the number of temporary vectors for * example. We use this additional structure instead of passing these * values directly to the constructor because this makes the use of - * the @p SolverSelector and other classes much easier and guarantees + * the @p{SolverSelector} and other classes much easier and guarantees * that these will continue to work even if number or type of the * additional parameters for a certain solver changes. * - * For the GMRes method, the @p AdditionalData structure contains the number - * of temporary vectors as commented upon above. By default, the number of - * these vectors is set to 30. The @p AdditionalData also containes a flag - * indicating the use of right or left preconditioning. The default is left - * preconditioning. Finally it includes a flag indicating whether or not the - * default residual is used as stopping criterion. By default, the left - * preconditioned GMRes uses the preconditioned residual and the right - * preconditioned GMRes uses the normal, i.e. unpreconditioned, residual as - * stopping criterion. If the @p use_default_residual flag is @p false, the - * left preconditioned GMRes uses as stopping criterion the unpreconditioned - * residual and the right preconditioned GMRes the preconditioned - * residual. But be aware that the non-default residuals are not automatically - * computed by the GMRes method but need to be computed in addition. This - * (especially for the left preconditioned GMRes) might lead to a significant - * loss in the solver performance. Therefore, the user should set - * use_default_residual=false only for debugging/testing purposes. + * For the GMRes method, the @p{AdditionalData} structure contains the + * number of temporary vectors as commented upon above. By default, + * the number of these vectors is set to 30. The @p{AdditionalData} + * also containes a flag indicating the use of right or left + * preconditioning. The default is left preconditioning. Finally it + * includes a flag indicating whether or not the default residual is + * used as stopping criterion. By default, the left preconditioned + * GMRes uses the preconditioned residual and the right preconditioned + * GMRes uses the normal, i.e. unpreconditioned, residual as stopping + * criterion. If the @p{use_default_residual} flag is @p{false}, the + * left preconditioned GMRes uses as stopping criterion the + * unpreconditioned residual and the right preconditioned GMRes the + * preconditioned residual. But be aware that the non-default + * residuals are not automatically computed by the GMRes method but + * need to be computed in addition. This (especially for the left + * preconditioned GMRes) might lead to a significant loss in the + * solver performance. Therefore, the user should set + * @p{use_default_residual=false} only for debugging/testing purposes. * * For the requirements on matrices and vectors in order to work with - * this class, see the documentation of the Solver base class. + * this class, see the documentation of the @ref{Solver} base class. * * @author Wolfgang Bangerth, Guido Kanschat, Ralf Hartmann. */ @@ -187,18 +104,23 @@ class SolverGMRES : public Solver struct AdditionalData { /** - * Constructor. By default, set the - * number of temporary vectors to 30, - * i.e. do a restart every - * approximately 30 iterations. Also - * set preconditioning from left and - * the residual of the stopping - * criterion to the default residual + * Constructor. By default, + * set the number of + * temporary vectors to 30, + * preconditioning from left + * and the residual of the + * stopping criterion to the + * default residual * (cf. class documentation). */ - AdditionalData (const unsigned int max_n_tmp_vectors = 30, - const bool right_preconditioning = false, - const bool use_default_residual = true); + AdditionalData(const unsigned int max_n_tmp_vectors = 30, + bool right_preconditioning = false, + bool use_default_residual = true) + : + max_n_tmp_vectors(max_n_tmp_vectors), + right_preconditioning(right_preconditioning), + use_default_residual(use_default_residual) + {}; /** * Maximum number of @@ -237,13 +159,16 @@ class SolverGMRES : public Solver VECTOR &x, const VECTOR &b, const PRECONDITIONER &precondition); - + + /** @addtogroup Exceptions + * @{ */ + DeclException1 (ExcTooFewTmpVectors, int, << "The number of temporary vectors you gave (" << arg1 << ") is too small. It should be at least 10 for " << "any results, and much more for reasonable ones."); - + //@} protected: /** * Includes the maximum number of @@ -271,34 +196,86 @@ class SolverGMRES : public Solver */ FullMatrix H; /** - * Auxiliary matrix for inverting @p H + * Auxiliary matrix for inverting @p{H} */ FullMatrix H1; + public: + /** + * Class to hold temporary + * vectors. This class + * automatically allocates a new + * vector, once it is needed. + * + * A future version should also + * be able to shift through + * vectors automatically, + * avoiding restart. + */ + + class TmpVectors + { + public: + /** + * Constructor. Prepares an + * array of @p{VECTOR} of + * length @p{max_size}. + */ + TmpVectors(unsigned int max_size, + VectorMemory& vmem); + /** + * Delete all allocated vectors. + */ + ~TmpVectors(); + + /** + * Get vector number + * @p{i}. If this vector was + * unused before, an error + * occurs. + */ + VECTOR& operator[] (unsigned int i) const; + + /** + * Get vector number + * @p{i}. Allocate it if + * necessary. + * + * If a vector must be + * allocated, @p{temp} is + * used to reinit it to the + * proper dimensions. + */ + VECTOR& operator() (unsigned int i, + const VECTOR& temp); + + private: + /** + * Pool were vectors are obtained from. + */ + VectorMemory& mem; + /** + * Field for storing the vectors. + */ + std::vector data; + /** + * Offset of the first + * vector. This is for later + * when vector rotation will + * be implemented. + */ + unsigned int offset; + }; private: - /** + /** * No copy constructor. */ SolverGMRES (const SolverGMRES&); }; -//! Generalized minimal residual method with flexible preconditioning. + /** * Flexible GMRES. - * - * This version of the GMRES method allows for the use of a different - * preconditioner in each iteration step. Therefore, it is also more - * robust with respect to inaccurate evaluation of the - * preconditioner. An important application is also the use of a - * Krylov space method inside the preconditioner. - * - * FGMRES needs two vectors in each iteration steps yielding a total - * of 2*AdditionalData::max_basis_size+1 auxiliary vectors. - * - * Caveat: documentation of this class is not up to date. There are - * also a few parameters of GMRES we would like to introduce here. - * - * @author Guido Kanschat, 2003 */ template > class SolverFGMRES : public Solver @@ -321,17 +298,25 @@ class SolverFGMRES : public Solver * default residual * (cf. class documentation). */ - AdditionalData(const unsigned int max_basis_size = 30, - const bool /*use_default_residual*/ = true) + AdditionalData(const unsigned int max_n_tmp_vectors = 30, + bool use_default_residual = true) : - max_basis_size(max_basis_size) + max_n_tmp_vectors(max_n_tmp_vectors), + use_default_residual(use_default_residual) {}; /** * Maximum number of * tmp vectors. */ - unsigned int max_basis_size; + unsigned int max_n_tmp_vectors; + + /** + * Flag for the default + * residual that is used as + * stopping criterion. + */ + bool use_default_residual; }; /** @@ -362,89 +347,70 @@ class SolverFGMRES : public Solver */ FullMatrix H; /** - * Auxiliary matrix for inverting @p H + * Auxiliary matrix for inverting @p{H} */ FullMatrix H1; }; -/*@}*/ /* --------------------- Inline and template functions ------------------- */ +template +inline +SolverGMRES::TmpVectors::TmpVectors (unsigned int max_size, + VectorMemory& vmem) + : + mem(vmem), + data (max_size, 0), + offset(0) +{} + -namespace internal +template +inline +SolverGMRES::TmpVectors::~TmpVectors () { - namespace SolverGMRES - { - template - inline - TmpVectors:: - TmpVectors (const unsigned int max_size, - VectorMemory &vmem) - : - mem(vmem), - data (max_size, 0), - offset(0) - {} - - - template - inline - TmpVectors::~TmpVectors () - { - for (typename std::vector::iterator v = data.begin(); - v != data.end(); ++v) - if (*v != 0) - mem.free(*v); - } + for (typename std::vector::iterator v = data.begin(); + v != data.end(); ++v) + if (*v != 0) + mem.free(*v); +} - template - inline VECTOR& - TmpVectors::operator[] (const unsigned int i) const - { - Assert (i+offset +inline VECTOR& +SolverGMRES::TmpVectors::operator[] (unsigned int i) const +{ + Assert (i+offset - inline VECTOR& - TmpVectors::operator() (const unsigned int i, - const VECTOR &temp) +template +inline VECTOR& +SolverGMRES::TmpVectors::operator() (unsigned int i, + const VECTOR& temp) +{ + Assert (i+offsetreinit(temp); - } - return *data[i-offset]; + data[i] = mem.alloc(); + data[i]->reinit(temp); } - } + return *data[i]; } - -template -inline -SolverGMRES::AdditionalData:: -AdditionalData (const unsigned int max_n_tmp_vectors, - const bool right_preconditioning, - const bool use_default_residual) - : - max_n_tmp_vectors(max_n_tmp_vectors), - right_preconditioning(right_preconditioning), - use_default_residual(use_default_residual) -{} - template SolverGMRES::SolverGMRES (SolverControl &cn, - VectorMemory &mem, - const AdditionalData &data) : + VectorMemory &mem, + const AdditionalData &data) : Solver (cn,mem), additional_data(data) {} @@ -500,7 +466,7 @@ SolverGMRES::solve (const MATRIX &A, // Generate an object where basis // vectors are stored. - internal::SolverGMRES::TmpVectors tmp_vectors (n_tmp_vectors, this->memory); + TmpVectors tmp_vectors (n_tmp_vectors, this->memory); // number of the present iteration; this // number is not reset to zero upon a @@ -513,10 +479,10 @@ SolverGMRES::solve (const MATRIX &A, // some additional vectors, also used // in the orthogonalization - ::Vector gamma(n_tmp_vectors), - ci (n_tmp_vectors-1), - si (n_tmp_vectors-1), - h (n_tmp_vectors-1); + Vector gamma(n_tmp_vectors), + ci (n_tmp_vectors-1), + si (n_tmp_vectors-1), + h (n_tmp_vectors-1); unsigned int dim = 0; @@ -556,7 +522,7 @@ SolverGMRES::solve (const MATRIX &A, gamma_=new ::Vector (gamma.size()); } - /////////////////////////////////// +/////////////////////////////////// // outer iteration: loop until we // either reach convergence or the // maximum number of iterations is @@ -626,14 +592,14 @@ SolverGMRES::solve (const MATRIX &A, gamma(0) = rho; - v *= 1./rho; + v.scale (1./rho); // inner iteration doing at // most as many steps as there // are temporary vectors. the // number of steps actually // been done is propagated - // outside through the @p dim + // outside through the @p{dim} // variable for (unsigned int inner_iteration=0; ((inner_iteration < n_tmp_vectors-2) @@ -677,7 +643,7 @@ SolverGMRES::solve (const MATRIX &A, s = vv.l2_norm(); h(inner_iteration+1) = s; - vv *= 1./s; + vv.scale(1./s); /* Transformation into triagonal structure */ @@ -825,12 +791,12 @@ SolverFGMRES::solve ( SolverControl::State iteration_state = SolverControl::iterate; - const unsigned int basis_size = additional_data.max_basis_size; + const unsigned int n_tmp_vectors = additional_data.max_n_tmp_vectors; // Generate an object where basis // vectors are stored. - typename internal::SolverGMRES::TmpVectors v (basis_size, this->memory); - typename internal::SolverGMRES::TmpVectors z (basis_size, this->memory); + typename SolverGMRES::TmpVectors v (n_tmp_vectors, this->memory); + typename SolverGMRES::TmpVectors z (n_tmp_vectors, this->memory); // number of the present iteration; this // number is not reset to zero upon a @@ -839,7 +805,7 @@ SolverFGMRES::solve ( // matrix used for the orthogonalization // process later - H.reinit(basis_size+1, basis_size); + H.reinit(n_tmp_vectors+1, n_tmp_vectors); // Vectors for projected system Vector projected_rhs; @@ -859,10 +825,10 @@ SolverFGMRES::solve ( == SolverControl::success) break; - H.reinit(basis_size+1, basis_size); + H.reinit(n_tmp_vectors+1, n_tmp_vectors); double a = beta; - for (unsigned int j=0;j::solve ( break; } } + // Update solution vector for (unsigned int j=0;j::solve ( } #endif + -- 2.39.5